aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/RTFP.txt1784
-rw-r--r--Documentation/RCU/checklist.txt14
-rw-r--r--Documentation/RCU/stallwarn.txt87
-rw-r--r--Documentation/RCU/torture.txt33
-rw-r--r--Documentation/RCU/trace.txt36
-rw-r--r--arch/s390/kernel/irq.c9
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--include/linux/rcupdate.h83
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/trace/events/rcu.h63
-rw-r--r--init/Kconfig9
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/rcu.h26
-rw-r--r--kernel/rcupdate.c5
-rw-r--r--kernel/rcutiny.c26
-rw-r--r--kernel/rcutiny_plugin.h77
-rw-r--r--kernel/rcutorture.c91
-rw-r--r--kernel/rcutree.c507
-rw-r--r--kernel/rcutree.h27
-rw-r--r--kernel/rcutree_plugin.h450
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/srcu.c33
-rw-r--r--lib/Kconfig.debug24
-rw-r--r--net/ipv4/cipso_ipv4.c11
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/mac80211/mesh_pathtbl.c8
29 files changed, 2906 insertions, 583 deletions
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index c43460dade0f..7c1dfb19fc40 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -1,9 +1,10 @@
1Read the F-ing Papers! 1Read the Fscking Papers!
2 2
3 3
4This document describes RCU-related publications, and is followed by 4This document describes RCU-related publications, and is followed by
5the corresponding bibtex entries. A number of the publications may 5the corresponding bibtex entries. A number of the publications may
6be found at http://www.rdrop.com/users/paulmck/RCU/. 6be found at http://www.rdrop.com/users/paulmck/RCU/. For others, browsers
7and search engines will usually find what you are looking for.
7 8
8The first thing resembling RCU was published in 1980, when Kung and Lehman 9The first thing resembling RCU was published in 1980, when Kung and Lehman
9[Kung80] recommended use of a garbage collector to defer destruction 10[Kung80] recommended use of a garbage collector to defer destruction
@@ -160,7 +161,26 @@ which Mathieu Desnoyers is now maintaining [MathieuDesnoyers2009URCU]
160[MathieuDesnoyersPhD]. TINY_RCU [PaulEMcKenney2009BloatWatchRCU] made 161[MathieuDesnoyersPhD]. TINY_RCU [PaulEMcKenney2009BloatWatchRCU] made
161its appearance, as did expedited RCU [PaulEMcKenney2009expeditedRCU]. 162its appearance, as did expedited RCU [PaulEMcKenney2009expeditedRCU].
162The problem of resizeable RCU-protected hash tables may now be on a path 163The problem of resizeable RCU-protected hash tables may now be on a path
163to a solution [JoshTriplett2009RPHash]. 164to a solution [JoshTriplett2009RPHash]. A few academic researchers are now
165using RCU to solve their parallel problems [HariKannan2009DynamicAnalysisRCU].
166
1672010 produced a simpler preemptible-RCU implementation
168based on TREE_RCU [PaulEMcKenney2010SimpleOptRCU], lockdep-RCU
169[PaulEMcKenney2010LockdepRCU], another resizeable RCU-protected hash
170table [HerbertXu2010RCUResizeHash] (this one consuming more memory,
171but allowing arbitrary changes in hash function, as required for DoS
172avoidance in the networking code), realization of the 2009 RCU-protected
173hash table with atomic node move [JoshTriplett2010RPHash], an update on
174the RCU API [PaulEMcKenney2010RCUAPI].
175
1762011 marked the inclusion of Nick Piggin's fully lockless dentry search
177[LinusTorvalds2011Linux2:6:38:rc1:NPigginVFS], an RCU-protected red-black
178tree using software transactional memory to protect concurrent updates
179(strange, but true!) [PhilHoward2011RCUTMRBTree], yet another variant of
180RCU-protected resizeable hash tables [Triplett:2011:RPHash], the 3.0 RCU
181trainwreck [PaulEMcKenney2011RCU3.0trainwreck], and Neil Brown's "Meet the
182Lockers" LWN article [NeilBrown2011MeetTheLockers].
183
164 184
165Bibtex Entries 185Bibtex Entries
166 186
@@ -173,6 +193,14 @@ Bibtex Entries
173,volume="5" 193,volume="5"
174,number="3" 194,number="3"
175,pages="354-382" 195,pages="354-382"
196,note="Available:
197\url{http://portal.acm.org/citation.cfm?id=320619&dl=GUIDE,}
198[Viewed December 3, 2007]"
199,annotation={
200 Use garbage collector to clean up data after everyone is done with it.
201 .
202 Oldest use of something vaguely resembling RCU that I have found.
203}
176} 204}
177 205
178@techreport{Manber82 206@techreport{Manber82
@@ -184,6 +212,31 @@ Bibtex Entries
184,number="82-01-01" 212,number="82-01-01"
185,month="January" 213,month="January"
186,pages="28" 214,pages="28"
215,annotation={
216 .
217 Superseded by Manber84.
218 .
219 Describes concurrent AVL tree implementation. Uses a
220 garbage-collection mechanism to handle concurrent use and deletion
221 of nodes in the tree, but lacks the summary-of-execution-history
222 concept of read-copy locking.
223 .
224 Keeps full list of processes that were active when a given
225 node was to be deleted, and waits until all such processes have
226 -terminated- before allowing this node to be reused. This is
227 not described in great detail -- one could imagine using process
228 IDs for this if the ID space was large enough that overlapping
229 never occurred.
230 .
231 This restriction makes this algorithm unsuitable for use in
232 systems comprised of long-lived processes. It also produces
233 completely unacceptable overhead in systems with large numbers
234 of processes. Finally, it is specific to AVL trees.
235 .
236 Cites Kung80, so not an independent invention, but the first
237 RCU-like usage that does not rely on an automatic garbage
238 collector.
239}
187} 240}
188 241
189@article{Manber84 242@article{Manber84
@@ -195,6 +248,74 @@ Bibtex Entries
195,volume="9" 248,volume="9"
196,number="3" 249,number="3"
197,pages="439-455" 250,pages="439-455"
251,annotation={
252 Describes concurrent AVL tree implementation. Uses a
253 garbage-collection mechanism to handle concurrent use and deletion
254 of nodes in the tree, but lacks the summary-of-execution-history
255 concept of read-copy locking.
256 .
257 Keeps full list of processes that were active when a given
258 node was to be deleted, and waits until all such processes have
259 -terminated- before allowing this node to be reused. This is
260 not described in great detail -- one could imagine using process
261 IDs for this if the ID space was large enough that overlapping
262 never occurred.
263 .
264 This restriction makes this algorithm unsuitable for use in
265 systems comprised of long-lived processes. It also produces
266 completely unacceptable overhead in systems with large numbers
267 of processes. Finally, it is specific to AVL trees.
268}
269}
270
271@Conference{RichardRashid87a
272,Author="Richard Rashid and Avadis Tevanian and Michael Young and
273David Golub and Robert Baron and David Black and William Bolosky and
274Jonathan Chew"
275,Title="Machine-Independent Virtual Memory Management for Paged
276Uniprocessor and Multiprocessor Architectures"
277,Booktitle="{2\textsuperscript{nd} Symposium on Architectural Support
278for Programming Languages and Operating Systems}"
279,Publisher="Association for Computing Machinery"
280,Month="October"
281,Year="1987"
282,pages="31-39"
283,Address="Palo Alto, CA"
284,note="Available:
285\url{http://www.cse.ucsc.edu/~randal/221/rashid-machvm.pdf}
286[Viewed February 17, 2005]"
287,annotation={
288 Describes lazy TLB flush, where one waits for each CPU to pass
289 through a scheduling-clock interrupt before reusing a given range
290 of virtual address. Does not describe how one determines that
291 all CPUs have in fact taken such an interrupt, though there are
292 no shortage of straightforward methods for accomplishing this.
293 .
294 Note that it does not make sense to just wait a fixed amount of
295 time, since a given CPU might have interrupts disabled for an
296 extended amount of time.
297}
298}
299
300@article{BarbaraLiskov1988ArgusCACM
301,author = {Barbara Liskov}
302,title = {Distributed programming in {Argus}}
303,journal = {Commun. ACM}
304,volume = {31}
305,number = {3}
306,year = {1988}
307,issn = {0001-0782}
308,pages = {300--312}
309,doi = {http://doi.acm.org/10.1145/42392.42399}
310,publisher = {ACM}
311,address = {New York, NY, USA}
312,annotation= {
313 At the top of page 307: "Conflicts with deposits and withdrawals
314 are necessary if the reported total is to be up to date. They
315 could be avoided by having total return a sum that is slightly
316 out of date." Relies on semantics -- approximate numerical
317 values sometimes OK.
318}
198} 319}
199 320
200@techreport{Hennessy89 321@techreport{Hennessy89
@@ -216,6 +337,13 @@ Bibtex Entries
216,year="1990" 337,year="1990"
217,number="CS-TR-2222.1" 338,number="CS-TR-2222.1"
218,month="June" 339,month="June"
340,annotation={
341 Concurrent access to skip lists. Has both weak and strong search.
342 Uses concept of ``garbage queue'', but has no real way of cleaning
343 the garbage efficiently.
344 .
345 Appears to be an independent invention of an RCU-like mechanism.
346}
219} 347}
220 348
221@Book{Adams91 349@Book{Adams91
@@ -223,20 +351,15 @@ Bibtex Entries
223,title="Concurrent Programming, Principles, and Practices" 351,title="Concurrent Programming, Principles, and Practices"
224,Publisher="Benjamin Cummins" 352,Publisher="Benjamin Cummins"
225,Year="1991" 353,Year="1991"
354,annotation={
355 Has a few paragraphs describing ``chaotic relaxation'', a
356 numerical analysis technique that allows multiprocessors to
357 avoid synchronization overhead by using possibly-stale data.
358 .
359 Seems like this is descended from yet another independent
360 invention of RCU-like function -- but this is restricted
361 in that reclamation is not necessary.
226} 362}
227
228@phdthesis{HMassalinPhD
229,author="H. Massalin"
230,title="Synthesis: An Efficient Implementation of Fundamental Operating
231System Services"
232,school="Columbia University"
233,address="New York, NY"
234,year="1992"
235,annotation="
236 Mondo optimizing compiler.
237 Wait-free stuff.
238 Good advice: defer work to avoid synchronization.
239"
240} 363}
241 364
242@unpublished{Jacobson93 365@unpublished{Jacobson93
@@ -244,7 +367,13 @@ System Services"
244,title="Avoid Read-Side Locking Via Delayed Free" 367,title="Avoid Read-Side Locking Via Delayed Free"
245,year="1993" 368,year="1993"
246,month="September" 369,month="September"
247,note="Verbal discussion" 370,note="private communication"
371,annotation={
372 Use fixed time delay to approximate grace period. Very simple,
373 but subject to random memory corruption under heavy load.
374 .
375 Independent invention of RCU-like mechanism.
376}
248} 377}
249 378
250@Conference{AjuJohn95 379@Conference{AjuJohn95
@@ -256,6 +385,17 @@ System Services"
256,Year="1995" 385,Year="1995"
257,pages="11-23" 386,pages="11-23"
258,Address="New Orleans, LA" 387,Address="New Orleans, LA"
388,note="Available:
389\url{https://www.usenix.org/publications/library/proceedings/neworl/full_papers/john.a}
390[Viewed October 1, 2010]"
391,annotation={
392 Age vnodes out of the cache, and have a fixed time set by a kernel
393 parameter. Not clear that all races were in fact correctly handled.
394 Used a 20-minute time by default, which would most definitely not
395 be suitable during DoS attacks or virus scans.
396 .
397 Apparently independent invention of RCU-like mechanism.
398}
259} 399}
260 400
261@conference{Pu95a, 401@conference{Pu95a,
@@ -301,31 +441,47 @@ Utilizing Execution History and Thread Monitoring"
301,institution="US Patent and Trademark Office" 441,institution="US Patent and Trademark Office"
302,address="Washington, DC" 442,address="Washington, DC"
303,year="1995" 443,year="1995"
304,number="US Patent 5,442,758 (contributed under GPL)" 444,number="US Patent 5,442,758"
305,month="August" 445,month="August"
446,annotation={
447 Describes the parallel RCU infrastructure. Includes NUMA aspect
448 (structure of bitmap can reflect bus structure of computer system).
449 .
450 Another independent invention of an RCU-like mechanism, but the
451 "real" RCU this time!
452}
306} 453}
307 454
308@techreport{Slingwine97 455@techreport{Slingwine97
309,author="John D. Slingwine and Paul E. McKenney" 456,author="John D. Slingwine and Paul E. McKenney"
310,title="Method for maintaining data coherency using thread 457,title="Method for Maintaining Data Coherency Using Thread Activity
311activity summaries in a multicomputer system" 458Summaries in a Multicomputer System"
312,institution="US Patent and Trademark Office" 459,institution="US Patent and Trademark Office"
313,address="Washington, DC" 460,address="Washington, DC"
314,year="1997" 461,year="1997"
315,number="US Patent 5,608,893 (contributed under GPL)" 462,number="US Patent 5,608,893"
316,month="March" 463,month="March"
464,pages="19"
465,annotation={
466 Describes use of RCU to synchronize data between a pair of
467 SMP/NUMA computer systems.
468}
317} 469}
318 470
319@techreport{Slingwine98 471@techreport{Slingwine98
320,author="John D. Slingwine and Paul E. McKenney" 472,author="John D. Slingwine and Paul E. McKenney"
321,title="Apparatus and method for achieving reduced overhead 473,title="Apparatus and Method for Achieving Reduced Overhead Mutual
322mutual exclusion and maintaining coherency in a multiprocessor 474Exclusion and Maintaining Coherency in a Multiprocessor System
323system utilizing execution history and thread monitoring" 475Utilizing Execution History and Thread Monitoring"
324,institution="US Patent and Trademark Office" 476,institution="US Patent and Trademark Office"
325,address="Washington, DC" 477,address="Washington, DC"
326,year="1998" 478,year="1998"
327,number="US Patent 5,727,209 (contributed under GPL)" 479,number="US Patent 5,727,209"
328,month="March" 480,month="March"
481,annotation={
482 Describes doing an atomic update by copying the data item and
483 then substituting it into the data structure.
484}
329} 485}
330 486
331@Conference{McKenney98 487@Conference{McKenney98
@@ -337,6 +493,15 @@ Problems"
337,Year="1998" 493,Year="1998"
338,pages="509-518" 494,pages="509-518"
339,Address="Las Vegas, NV" 495,Address="Las Vegas, NV"
496,note="Available:
497\url{http://www.rdrop.com/users/paulmck/RCU/rclockpdcsproof.pdf}
498[Viewed December 3, 2007]"
499,annotation={
500 Describes and analyzes RCU mechanism in DYNIX/ptx. Describes
501 application to linked list update and log-buffer flushing.
502 Defines 'quiescent state'. Includes both measured and analytic
503 evaluation.
504}
340} 505}
341 506
342@Conference{Gamsa99 507@Conference{Gamsa99
@@ -349,18 +514,76 @@ Operating System Design and Implementation}"
349,Year="1999" 514,Year="1999"
350,pages="87-100" 515,pages="87-100"
351,Address="New Orleans, LA" 516,Address="New Orleans, LA"
517,note="Available:
518\url{http://www.usenix.org/events/osdi99/full_papers/gamsa/gamsa.pdf}
519[Viewed August 30, 2006]"
520,annotation={
521 Use of RCU-like facility in K42/Tornado. Another independent
522 invention of RCU.
523 See especially pages 7-9 (Section 5).
524}
525}
526
527@unpublished{RustyRussell2000a
528,Author="Rusty Russell"
529,Title="Re: modular net drivers"
530,month="June"
531,year="2000"
532,day="23"
533,note="Available:
534\url{http://oss.sgi.com/projects/netdev/archive/2000-06/msg00250.html}
535[Viewed April 10, 2006]"
536,annotation={
537 Proto-RCU proposal from Phil Rumpf and Rusty Russell.
538 Yet another independent invention of RCU.
539 Outline of algorithm to unload modules...
540 .
541 Appeared on net-dev mailing list.
542}
543}
544
545@unpublished{RustyRussell2000b
546,Author="Rusty Russell"
547,Title="Re: modular net drivers"
548,month="June"
549,year="2000"
550,day="24"
551,note="Available:
552\url{http://oss.sgi.com/projects/netdev/archive/2000-06/msg00254.html}
553[Viewed April 10, 2006]"
554,annotation={
555 Proto-RCU proposal from Phil Rumpf and Rusty Russell.
556 .
557 Appeared on net-dev mailing list.
558}
559}
560
561@unpublished{McKenney01b
562,Author="Paul E. McKenney and Dipankar Sarma"
563,Title="Read-Copy Update Mutual Exclusion in {Linux}"
564,month="February"
565,year="2001"
566,note="Available:
567\url{http://lse.sourceforge.net/locking/rcu/rcupdate_doc.html}
568[Viewed October 18, 2004]"
569,annotation={
570 Prototypical Linux documentation for RCU.
571}
352} 572}
353 573
354@techreport{Slingwine01 574@techreport{Slingwine01
355,author="John D. Slingwine and Paul E. McKenney" 575,author="John D. Slingwine and Paul E. McKenney"
356,title="Apparatus and method for achieving reduced overhead 576,title="Apparatus and Method for Achieving Reduced Overhead Mutual
357mutual exclusion and maintaining coherency in a multiprocessor 577Exclusion and Maintaining Coherency in a Multiprocessor System
358system utilizing execution history and thread monitoring" 578Utilizing Execution History and Thread Monitoring"
359,institution="US Patent and Trademark Office" 579,institution="US Patent and Trademark Office"
360,address="Washington, DC" 580,address="Washington, DC"
361,year="2001" 581,year="2001"
362,number="US Patent 5,219,690 (contributed under GPL)" 582,number="US Patent 6,219,690"
363,month="April" 583,month="April"
584,annotation={
585 'Change in mode' aspect of RCU. Can be thought of as a lazy barrier.
586}
364} 587}
365 588
366@Conference{McKenney01a 589@Conference{McKenney01a
@@ -372,14 +595,61 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
372,Year="2001" 595,Year="2001"
373,note="Available: 596,note="Available:
374\url{http://www.linuxsymposium.org/2001/abstracts/readcopy.php} 597\url{http://www.linuxsymposium.org/2001/abstracts/readcopy.php}
375\url{http://www.rdrop.com/users/paulmck/rclock/rclock_OLS.2001.05.01c.pdf} 598\url{http://www.rdrop.com/users/paulmck/RCU/rclock_OLS.2001.05.01c.pdf}
376[Viewed June 23, 2004]" 599[Viewed June 23, 2004]"
377annotation=" 600,annotation={
378Described RCU, and presented some patches implementing and using it in 601 Described RCU, and presented some patches implementing and using
379the Linux kernel. 602 it in the Linux kernel.
603}
604}
605
606@unpublished{McKenney01f
607,Author="Paul E. McKenney"
608,Title="{RFC:} patch to allow lock-free traversal of lists with insertion"
609,month="October"
610,year="2001"
611,note="Available:
612\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100259266316456&w=2}
613[Viewed June 23, 2004]"
614,annotation="
615 Memory-barrier and Alpha thread. 100 messages, not too bad...
616"
617}
618
619@unpublished{Spraul01
620,Author="Manfred Spraul"
621,Title="Re: {RFC:} patch to allow lock-free traversal of lists with insertion"
622,month="October"
623,year="2001"
624,note="Available:
625\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100264675012867&w=2}
626[Viewed June 23, 2004]"
627,annotation="
628 Suggested burying memory barriers in Linux's list-manipulation
629 primitives.
380" 630"
381} 631}
382 632
633@unpublished{LinusTorvalds2001a
634,Author="Linus Torvalds"
635,Title="{Re:} {[Lse-tech]} {Re:} {RFC:} patch to allow lock-free traversal of lists with insertion"
636,month="October"
637,year="2001"
638,note="Available:
639\url{http://lkml.org/lkml/2001/10/13/105}
640[Viewed August 21, 2004]"
641}
642
643@unpublished{Blanchard02a
644,Author="Anton Blanchard"
645,Title="some RCU dcache and ratcache results"
646,month="March"
647,year="2002"
648,note="Available:
649\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=101637107412972&w=2}
650[Viewed October 18, 2004]"
651}
652
383@Conference{Linder02a 653@Conference{Linder02a
384,Author="Hanna Linder and Dipankar Sarma and Maneesh Soni" 654,Author="Hanna Linder and Dipankar Sarma and Maneesh Soni"
385,Title="Scalability of the Directory Entry Cache" 655,Title="Scalability of the Directory Entry Cache"
@@ -387,6 +657,10 @@ the Linux kernel.
387,Month="June" 657,Month="June"
388,Year="2002" 658,Year="2002"
389,pages="289-300" 659,pages="289-300"
660,annotation="
661 Measured scalability of Linux 2.4 kernel's directory-entry cache
662 (dcache), and measured some scalability enhancements.
663"
390} 664}
391 665
392@Conference{McKenney02a 666@Conference{McKenney02a
@@ -400,49 +674,76 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
400,note="Available: 674,note="Available:
401\url{http://www.linux.org.uk/~ajh/ols2002_proceedings.pdf.gz} 675\url{http://www.linux.org.uk/~ajh/ols2002_proceedings.pdf.gz}
402[Viewed June 23, 2004]" 676[Viewed June 23, 2004]"
677,annotation="
678 Presented and compared a number of RCU implementations for the
679 Linux kernel.
680"
403} 681}
404 682
405@conference{Michael02a 683@unpublished{Sarma02a
406,author="Maged M. Michael" 684,Author="Dipankar Sarma"
407,title="Safe Memory Reclamation for Dynamic Lock-Free Objects Using Atomic 685,Title="specweb99: dcache scalability results"
408Reads and Writes" 686,month="July"
409,Year="2002" 687,year="2002"
410,Month="August" 688,note="Available:
411,booktitle="{Proceedings of the 21\textsuperscript{st} Annual ACM 689\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=102645767914212&w=2}
412Symposium on Principles of Distributed Computing}" 690[Viewed June 23, 2004]"
413,pages="21-30"
414,annotation=" 691,annotation="
415 Each thread keeps an array of pointers to items that it is 692 Compare fastwalk and RCU for dcache. RCU won.
416 currently referencing. Sort of an inside-out garbage collection
417 mechanism, but one that requires the accessing code to explicitly
418 state its needs. Also requires read-side memory barriers on
419 most architectures.
420" 693"
421} 694}
422 695
423@conference{Michael02b 696@unpublished{Barbieri02
424,author="Maged M. Michael" 697,Author="Luca Barbieri"
425,title="High Performance Dynamic Lock-Free Hash Tables and List-Based Sets" 698,Title="Re: {[PATCH]} Initial support for struct {vfs\_cred}"
426,Year="2002" 699,month="August"
427,Month="August" 700,year="2002"
428,booktitle="{Proceedings of the 14\textsuperscript{th} Annual ACM 701,note="Available:
429Symposium on Parallel 702\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103082050621241&w=2}
430Algorithms and Architecture}" 703[Viewed: June 23, 2004]"
431,pages="73-82"
432,annotation=" 704,annotation="
433 Like the title says... 705 Suggested RCU for vfs\_shared\_cred.
434" 706"
435} 707}
436 708
437@InProceedings{HerlihyLM02 709@unpublished{Dickins02a
438,author={Maurice Herlihy and Victor Luchangco and Mark Moir} 710,author="Hugh Dickins"
439,title="The Repeat Offender Problem: A Mechanism for Supporting Dynamic-Sized, 711,title="Use RCU for System-V IPC"
440Lock-Free Data Structures" 712,year="2002"
441,booktitle={Proceedings of 16\textsuperscript{th} International 713,month="October"
442Symposium on Distributed Computing} 714,note="private communication"
443,year=2002 715}
716
717@unpublished{Sarma02b
718,Author="Dipankar Sarma"
719,Title="Some dcache\_rcu benchmark numbers"
444,month="October" 720,month="October"
445,pages="339-353" 721,year="2002"
722,note="Available:
723\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103462075416638&w=2}
724[Viewed June 23, 2004]"
725,annotation="
726 Performance of dcache RCU on kernbench for 16x NUMA-Q and 1x,
727 2x, and 4x systems. RCU does no harm, and helps on 16x.
728"
729}
730
731@unpublished{LinusTorvalds2003a
732,Author="Linus Torvalds"
733,Title="Re: {[PATCH]} small fixes in brlock.h"
734,month="March"
735,year="2003"
736,note="Available:
737\url{http://lkml.org/lkml/2003/3/9/205}
738[Viewed March 13, 2006]"
739,annotation="
740 Linus suggests replacing brlock with RCU and/or seqlocks:
741 .
742 'It's entirely possible that the current user could be replaced
743 by RCU and/or seqlocks, and we could get rid of brlocks entirely.'
744 .
745 Steve Hemminger responds by replacing them with RCU.
746"
446} 747}
447 748
448@article{Appavoo03a 749@article{Appavoo03a
@@ -457,6 +758,20 @@ B. Rosenburg and M. Stumm and J. Xenidis"
457,volume="42" 758,volume="42"
458,number="1" 759,number="1"
459,pages="60-76" 760,pages="60-76"
761,annotation="
762 Use of RCU to enable hot-swapping for autonomic behavior in K42.
763"
764}
765
766@unpublished{Seigh03
767,author="Joseph W. {Seigh II}"
768,title="Read Copy Update"
769,Year="2003"
770,Month="March"
771,note="email correspondence"
772,annotation="
773 Described the relationship of the VM/XA passive serialization to RCU.
774"
460} 775}
461 776
462@Conference{Arcangeli03 777@Conference{Arcangeli03
@@ -470,6 +785,27 @@ Dipankar Sarma"
470,year="2003" 785,year="2003"
471,month="June" 786,month="June"
472,pages="297-310" 787,pages="297-310"
788,note="Available:
789\url{http://www.rdrop.com/users/paulmck/RCU/rcu.FREENIX.2003.06.14.pdf}
790[Viewed November 21, 2007]"
791,annotation="
792 Compared updated RCU implementations for the Linux kernel, and
793 described System V IPC use of RCU, including order-of-magnitude
794 performance improvements.
795"
796}
797
798@Conference{Soules03a
799,Author="Craig A. N. Soules and Jonathan Appavoo and Kevin Hui and
800Dilma {Da Silva} and Gregory R. Ganger and Orran Krieger and
801Michael Stumm and Robert W. Wisniewski and Marc Auslander and
802Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
803,Title="System Support for Online Reconfiguration"
804,Booktitle="Proceedings of the 2003 USENIX Annual Technical Conference"
805,Publisher="USENIX Association"
806,year="2003"
807,month="June"
808,pages="141-154"
473} 809}
474 810
475@article{McKenney03a 811@article{McKenney03a
@@ -481,6 +817,22 @@ Dipankar Sarma"
481,volume="1" 817,volume="1"
482,number="114" 818,number="114"
483,pages="18-26" 819,pages="18-26"
820,note="Available:
821\url{http://www.linuxjournal.com/article/6993}
822[Viewed November 14, 2007]"
823,annotation="
824 Reader-friendly intro to RCU, with the infamous old-man-and-brat
825 cartoon.
826"
827}
828
829@unpublished{Sarma03a
830,Author="Dipankar Sarma"
831,Title="RCU low latency patches"
832,month="December"
833,year="2003"
834,note="Message ID: 20031222180114.GA2248@in.ibm.com"
835,annotation="dipankar/ct.2004.03.27/RCUll.2003.12.22.patch"
484} 836}
485 837
486@techreport{Friedberg03a 838@techreport{Friedberg03a
@@ -489,9 +841,14 @@ Dipankar Sarma"
489,institution="US Patent and Trademark Office" 841,institution="US Patent and Trademark Office"
490,address="Washington, DC" 842,address="Washington, DC"
491,year="2003" 843,year="2003"
492,number="US Patent 6,662,184 (contributed under GPL)" 844,number="US Patent 6,662,184"
493,month="December" 845,month="December"
494,pages="112" 846,pages="112"
847,annotation="
848 Applies RCU to a wildcard-search Patricia tree in order to permit
849 synchronization-free lookup. RCU is used to retain removed nodes
850 for a grace period before freeing them.
851"
495} 852}
496 853
497@article{McKenney04a 854@article{McKenney04a
@@ -503,6 +860,12 @@ Dipankar Sarma"
503,volume="1" 860,volume="1"
504,number="118" 861,number="118"
505,pages="38-46" 862,pages="38-46"
863,note="Available:
864\url{http://www.linuxjournal.com/node/7124}
865[Viewed December 26, 2010]"
866,annotation="
867 Reader friendly intro to dcache and RCU.
868"
506} 869}
507 870
508@Conference{McKenney04b 871@Conference{McKenney04b
@@ -514,8 +877,83 @@ Dipankar Sarma"
514,Address="Adelaide, Australia" 877,Address="Adelaide, Australia"
515,note="Available: 878,note="Available:
516\url{http://www.linux.org.au/conf/2004/abstracts.html#90} 879\url{http://www.linux.org.au/conf/2004/abstracts.html#90}
517\url{http://www.rdrop.com/users/paulmck/rclock/lockperf.2004.01.17a.pdf} 880\url{http://www.rdrop.com/users/paulmck/RCU/lockperf.2004.01.17a.pdf}
518[Viewed June 23, 2004]" 881[Viewed June 23, 2004]"
882,annotation="
883 Compares performance of RCU to that of other locking primitives
884 over a number of CPUs (x86, Opteron, Itanium, and PPC).
885"
886}
887
888@unpublished{Sarma04a
889,Author="Dipankar Sarma"
890,Title="{[PATCH]} {RCU} for low latency (experimental)"
891,month="March"
892,year="2004"
893,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108003746402892&w=2}"
894,annotation="Head of thread: dipankar/2004.03.23/rcu-low-lat.1.patch"
895}
896
897@unpublished{Sarma04b
898,Author="Dipankar Sarma"
899,Title="Re: {[PATCH]} {RCU} for low latency (experimental)"
900,month="March"
901,year="2004"
902,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108016474829546&w=2}"
903,annotation="dipankar/rcuth.2004.03.24/rcu-throttle.patch"
904}
905
906@unpublished{Spraul04a
907,Author="Manfred Spraul"
908,Title="[RFC] 0/5 rcu lock update"
909,month="May"
910,year="2004"
911,note="Available:
912\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108546407726602&w=2}
913[Viewed June 23, 2004]"
914,annotation="
915 Hierarchical-bitmap patch for RCU infrastructure.
916"
917}
918
919@unpublished{Steiner04a
920,Author="Jack Steiner"
921,Title="Re: [Lse-tech] [RFC, PATCH] 1/5 rcu lock update:
922Add per-cpu batch counter"
923,month="May"
924,year="2004"
925,note="Available:
926\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108551764515332&w=2}
927[Viewed June 23, 2004]"
928,annotation={
929 RCU runs reasonably on a 512-CPU SGI using Manfred Spraul's patches,
930 which may be found at:
931 https://lkml.org/lkml/2004/5/20/49 (split vars into cachelines)
932 https://lkml.org/lkml/2004/5/22/114 (cpu_quiet() patch)
933 https://lkml.org/lkml/2004/5/25/24 (0/5)
934 https://lkml.org/lkml/2004/5/25/23 (1/5)
935 https://lkml.org/lkml/2004/5/25/265 (works for Jack)
936 https://lkml.org/lkml/2004/5/25/20 (2/5)
937 https://lkml.org/lkml/2004/5/25/22 (3/5)
938 https://lkml.org/lkml/2004/5/25/19 (4/5)
939 https://lkml.org/lkml/2004/5/25/21 (5/5)
940}
941}
942
943@Conference{Sarma04c
944,Author="Dipankar Sarma and Paul E. McKenney"
945,Title="Making {RCU} Safe for Deep Sub-Millisecond Response
946Realtime Applications"
947,Booktitle="Proceedings of the 2004 USENIX Annual Technical Conference
948(FREENIX Track)"
949,Publisher="USENIX Association"
950,year="2004"
951,month="June"
952,pages="182-191"
953,annotation="
954 Describes and compares a number of modifications to the Linux RCU
955 implementation that make it friendly to realtime applications.
956"
519} 957}
520 958
521@phdthesis{PaulEdwardMcKenneyPhD 959@phdthesis{PaulEdwardMcKenneyPhD
@@ -529,17 +967,118 @@ Oregon Health and Sciences University"
529,note="Available: 967,note="Available:
530\url{http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf} 968\url{http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf}
531[Viewed October 15, 2004]" 969[Viewed October 15, 2004]"
970,annotation="
971 Describes RCU implementations and presents design patterns
972 corresponding to common uses of RCU in several operating-system
973 kernels.
974"
532} 975}
533 976
534@Conference{Sarma04c 977@unpublished{PaulEMcKenney2004rcu:dereference
535,Author="Dipankar Sarma and Paul E. McKenney" 978,Author="Dipankar Sarma"
536,Title="Making RCU Safe for Deep Sub-Millisecond Response Realtime Applications" 979,Title="{Re: RCU : Abstracted RCU dereferencing [5/5]}"
537,Booktitle="Proceedings of the 2004 USENIX Annual Technical Conference 980,month="August"
538(FREENIX Track)"
539,Publisher="USENIX Association"
540,year="2004" 981,year="2004"
541,month="June" 982,note="Available:
542,pages="182-191" 983\url{http://lkml.org/lkml/2004/8/6/237}
984[Viewed June 8, 2010]"
985,annotation="
986 Introduce rcu_dereference().
987"
988}
989
990@unpublished{JimHouston04a
991,Author="Jim Houston"
992,Title="{[RFC\&PATCH] Alternative {RCU} implementation}"
993,month="August"
994,year="2004"
995,note="Available:
996\url{http://lkml.org/lkml/2004/8/30/87}
997[Viewed February 17, 2005]"
998,annotation="
999 Uses active code in rcu_read_lock() and rcu_read_unlock() to
1000 make RCU happen, allowing RCU to function on CPUs that do not
1001 receive a scheduling-clock interrupt.
1002"
1003}
1004
1005@unpublished{TomHart04a
1006,Author="Thomas E. Hart"
1007,Title="Master's Thesis: Applying Lock-free Techniques to the {Linux} Kernel"
1008,month="October"
1009,year="2004"
1010,note="Available:
1011\url{http://www.cs.toronto.edu/~tomhart/masters_thesis.html}
1012[Viewed October 15, 2004]"
1013,annotation="
1014 Proposes comparing RCU to lock-free methods for the Linux kernel.
1015"
1016}
1017
1018@unpublished{Vaddagiri04a
1019,Author="Srivatsa Vaddagiri"
1020,Title="Subject: [RFC] Use RCU for tcp\_ehash lookup"
1021,month="October"
1022,year="2004"
1023,note="Available:
1024\url{http://marc.theaimsgroup.com/?t=109395731700004&r=1&w=2}
1025[Viewed October 18, 2004]"
1026,annotation="
1027 Srivatsa's RCU patch for tcp_ehash lookup.
1028"
1029}
1030
1031@unpublished{Thirumalai04a
1032,Author="Ravikiran Thirumalai"
1033,Title="Subject: [patchset] Lockfree fd lookup 0 of 5"
1034,month="October"
1035,year="2004"
1036,note="Available:
1037\url{http://marc.theaimsgroup.com/?t=109144217400003&r=1&w=2}
1038[Viewed October 18, 2004]"
1039,annotation="
1040 Ravikiran's lockfree FD patch.
1041"
1042}
1043
1044@unpublished{Thirumalai04b
1045,Author="Ravikiran Thirumalai"
1046,Title="Subject: Re: [patchset] Lockfree fd lookup 0 of 5"
1047,month="October"
1048,year="2004"
1049,note="Available:
1050\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=109152521410459&w=2}
1051[Viewed October 18, 2004]"
1052,annotation="
1053 Ravikiran's lockfree FD patch.
1054"
1055}
1056
1057@unpublished{PaulEMcKenney2004rcu:assign:pointer
1058,Author="Paul E. McKenney"
1059,Title="{[PATCH 1/3] RCU: \url{rcu_assign_pointer()} removal of memory barriers}"
1060,month="October"
1061,year="2004"
1062,note="Available:
1063\url{http://lkml.org/lkml/2004/10/23/241}
1064[Viewed June 8, 2010]"
1065,annotation="
1066 Introduce rcu_assign_pointer().
1067"
1068}
1069
1070@unpublished{JamesMorris04a
1071,Author="James Morris"
1072,Title="{[PATCH 2/3] SELinux} scalability - convert {AVC} to {RCU}"
1073,day="15"
1074,month="November"
1075,year="2004"
1076,note="Available:
1077\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=110054979416004&w=2}
1078[Viewed December 10, 2004]"
1079,annotation="
1080 James Morris posts Kaigai Kohei's patch to LKML.
1081"
543} 1082}
544 1083
545@unpublished{JamesMorris04b 1084@unpublished{JamesMorris04b
@@ -550,6 +1089,85 @@ Oregon Health and Sciences University"
550,note="Available: 1089,note="Available:
551\url{http://www.livejournal.com/users/james_morris/2153.html} 1090\url{http://www.livejournal.com/users/james_morris/2153.html}
552[Viewed December 10, 2004]" 1091[Viewed December 10, 2004]"
1092,annotation="
1093 RCU helps SELinux performance. ;-) Made LWN.
1094"
1095}
1096
1097@unpublished{PaulMcKenney2005RCUSemantics
1098,Author="Paul E. McKenney and Jonathan Walpole"
1099,Title="{RCU} Semantics: A First Attempt"
1100,month="January"
1101,year="2005"
1102,day="30"
1103,note="Available:
1104\url{http://www.rdrop.com/users/paulmck/RCU/rcu-semantics.2005.01.30a.pdf}
1105[Viewed December 6, 2009]"
1106,annotation="
1107 Early derivation of RCU semantics.
1108"
1109}
1110
1111@unpublished{PaulMcKenney2005e
1112,Author="Paul E. McKenney"
1113,Title="Real-Time Preemption and {RCU}"
1114,month="March"
1115,year="2005"
1116,day="17"
1117,note="Available:
1118\url{http://lkml.org/lkml/2005/3/17/199}
1119[Viewed September 5, 2005]"
1120,annotation="
1121 First posting showing how RCU can be safely adapted for
1122 preemptable RCU read side critical sections.
1123"
1124}
1125
1126@unpublished{EsbenNeilsen2005a
1127,Author="Esben Neilsen"
1128,Title="Re: Real-Time Preemption and {RCU}"
1129,month="March"
1130,year="2005"
1131,day="18"
1132,note="Available:
1133\url{http://lkml.org/lkml/2005/3/18/122}
1134[Viewed March 30, 2006]"
1135,annotation="
1136 Esben Neilsen suggests read-side suppression of grace-period
1137 processing for crude-but-workable realtime RCU. The downside
1138 is indefinite grace periods...But this is OK for experimentation
1139 and testing.
1140"
1141}
1142
1143@unpublished{TomHart05a
1144,Author="Thomas E. Hart and Paul E. McKenney and Angela Demke Brown"
1145,Title="Efficient Memory Reclamation is Necessary for Fast Lock-Free
1146Data Structures"
1147,month="March"
1148,year="2005"
1149,note="Available:
1150\url{ftp://ftp.cs.toronto.edu/csrg-technical-reports/515/}
1151[Viewed March 4, 2005]"
1152,annotation="
1153 Comparison of RCU, QBSR, and EBSR. RCU wins for read-mostly
1154 workloads. ;-)
1155"
1156}
1157
1158@unpublished{JonCorbet2005DeprecateSyncKernel
1159,Author="Jonathan Corbet"
1160,Title="API change: synchronize_kernel() deprecated"
1161,month="May"
1162,day="3"
1163,year="2005"
1164,note="Available:
1165\url{http://lwn.net/Articles/134484/}
1166[Viewed May 3, 2005]"
1167,annotation="
1168 Jon Corbet describes deprecation of synchronize_kernel()
1169 in favor of synchronize_rcu() and synchronize_sched().
1170"
553} 1171}
554 1172
555@unpublished{PaulMcKenney05a 1173@unpublished{PaulMcKenney05a
@@ -568,7 +1186,7 @@ Oregon Health and Sciences University"
568 1186
569@conference{PaulMcKenney05b 1187@conference{PaulMcKenney05b
570,Author="Paul E. McKenney and Dipankar Sarma" 1188,Author="Paul E. McKenney and Dipankar Sarma"
571,Title="Towards Hard Realtime Response from the Linux Kernel on SMP Hardware" 1189,Title="Towards Hard Realtime Response from the {Linux} Kernel on {SMP} Hardware"
572,Booktitle="linux.conf.au 2005" 1190,Booktitle="linux.conf.au 2005"
573,month="April" 1191,month="April"
574,year="2005" 1192,year="2005"
@@ -578,6 +1196,103 @@ Oregon Health and Sciences University"
578[Viewed May 13, 2005]" 1196[Viewed May 13, 2005]"
579,annotation=" 1197,annotation="
580 Realtime turns into making RCU yet more realtime friendly. 1198 Realtime turns into making RCU yet more realtime friendly.
1199 http://lca2005.linux.org.au/Papers/Paul%20McKenney/Towards%20Hard%20Realtime%20Response%20from%20the%20Linux%20Kernel/LKS.2005.04.22a.pdf
1200"
1201}
1202
1203@unpublished{PaulEMcKenneyHomePage
1204,Author="Paul E. McKenney"
1205,Title="{Paul} {E.} {McKenney}"
1206,month="May"
1207,year="2005"
1208,note="Available:
1209\url{http://www.rdrop.com/users/paulmck/}
1210[Viewed May 25, 2005]"
1211,annotation="
1212 Paul McKenney's home page.
1213"
1214}
1215
1216@unpublished{PaulEMcKenneyRCUPage
1217,Author="Paul E. McKenney"
1218,Title="Read-Copy Update {(RCU)}"
1219,month="May"
1220,year="2005"
1221,note="Available:
1222\url{http://www.rdrop.com/users/paulmck/RCU}
1223[Viewed May 25, 2005]"
1224,annotation="
1225 Paul McKenney's RCU page.
1226"
1227}
1228
1229@unpublished{JosephSeigh2005a
1230,Author="Joseph Seigh"
1231,Title="{RCU}+{SMR} (hazard pointers)"
1232,month="July"
1233,year="2005"
1234,note="Personal communication"
1235,annotation="
1236 Joe Seigh announcing his atomic-ptr-plus project.
1237 http://sourceforge.net/projects/atomic-ptr-plus/
1238"
1239}
1240
1241@unpublished{JosephSeigh2005b
1242,Author="Joseph Seigh"
1243,Title="Lock-free synchronization primitives"
1244,month="July"
1245,day="6"
1246,year="2005"
1247,note="Available:
1248\url{http://sourceforge.net/projects/atomic-ptr-plus/}
1249[Viewed August 8, 2005]"
1250,annotation="
1251 Joe Seigh's atomic-ptr-plus project.
1252"
1253}
1254
1255@unpublished{PaulMcKenney2005c
1256,Author="Paul E.McKenney"
1257,Title="{[RFC,PATCH] RCU} and {CONFIG\_PREEMPT\_RT} sane patch"
1258,month="August"
1259,day="1"
1260,year="2005"
1261,note="Available:
1262\url{http://lkml.org/lkml/2005/8/1/155}
1263[Viewed March 14, 2006]"
1264,annotation="
1265 First operating counter-based realtime RCU patch posted to LKML.
1266"
1267}
1268
1269@unpublished{PaulMcKenney2005d
1270,Author="Paul E. McKenney"
1271,Title="Re: [Fwd: Re: [patch] Real-Time Preemption, -RT-2.6.13-rc4-V0.7.52-01]"
1272,month="August"
1273,day="8"
1274,year="2005"
1275,note="Available:
1276\url{http://lkml.org/lkml/2005/8/8/108}
1277[Viewed March 14, 2006]"
1278,annotation="
1279 First operating counter-based realtime RCU patch posted to LKML,
1280 but fixed so that various unusual combinations of configuration
1281 parameters all function properly.
1282"
1283}
1284
1285@unpublished{PaulMcKenney2005rcutorture
1286,Author="Paul E. McKenney"
1287,Title="{[PATCH]} {RCU} torture testing"
1288,month="October"
1289,day="1"
1290,year="2005"
1291,note="Available:
1292\url{http://lkml.org/lkml/2005/10/1/70}
1293[Viewed March 14, 2006]"
1294,annotation="
1295 First rcutorture patch.
581" 1296"
582} 1297}
583 1298
@@ -591,22 +1306,39 @@ Distributed Processing Symposium"
591,year="2006" 1306,year="2006"
592,day="25-29" 1307,day="25-29"
593,address="Rhodes, Greece" 1308,address="Rhodes, Greece"
1309,note="Available:
1310\url{http://www.rdrop.com/users/paulmck/RCU/hart_ipdps06.pdf}
1311[Viewed April 28, 2008]"
1312,annotation="
1313 Compares QSBR, HPBR, EBR, and lock-free reference counting.
1314 http://www.cs.toronto.edu/~tomhart/perflab/ipdps06.tgz
1315"
1316}
1317
1318@unpublished{NickPiggin2006radixtree
1319,Author="Nick Piggin"
1320,Title="[patch 3/3] radix-tree: {RCU} lockless readside"
1321,month="June"
1322,day="20"
1323,year="2006"
1324,note="Available:
1325\url{http://lkml.org/lkml/2006/6/20/238}
1326[Viewed March 25, 2008]"
594,annotation=" 1327,annotation="
595 Compares QSBR (AKA "classic RCU"), HPBR, EBR, and lock-free 1328 RCU-protected radix tree.
596 reference counting.
597" 1329"
598} 1330}
599 1331
600@Conference{PaulEMcKenney2006b 1332@Conference{PaulEMcKenney2006b
601,Author="Paul E. McKenney and Dipankar Sarma and Ingo Molnar and 1333,Author="Paul E. McKenney and Dipankar Sarma and Ingo Molnar and
602Suparna Bhattacharya" 1334Suparna Bhattacharya"
603,Title="Extending RCU for Realtime and Embedded Workloads" 1335,Title="Extending {RCU} for Realtime and Embedded Workloads"
604,Booktitle="{Ottawa Linux Symposium}" 1336,Booktitle="{Ottawa Linux Symposium}"
605,Month="July" 1337,Month="July"
606,Year="2006" 1338,Year="2006"
607,pages="v2 123-138" 1339,pages="v2 123-138"
608,note="Available: 1340,note="Available:
609\url{http://www.linuxsymposium.org/2006/index_2006.php} 1341\url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184}
610\url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf} 1342\url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf}
611[Viewed January 1, 2007]" 1343[Viewed January 1, 2007]"
612,annotation=" 1344,annotation="
@@ -614,6 +1346,37 @@ Suparna Bhattacharya"
614" 1346"
615} 1347}
616 1348
1349@unpublished{WikipediaRCU
1350,Author="Paul E. McKenney and Chris Purcell and Algae and Ben Schumin and
1351Gaius Cornelius and Qwertyus and Neil Conway and Sbw and Blainster and
1352Canis Rufus and Zoicon5 and Anome and Hal Eisen"
1353,Title="Read-Copy Update"
1354,month="July"
1355,day="8"
1356,year="2006"
1357,note="Available:
1358\url{http://en.wikipedia.org/wiki/Read-copy-update}
1359[Viewed August 21, 2006]"
1360,annotation="
1361 Wikipedia RCU page as of July 8 2006.
1362"
1363}
1364
1365@Conference{NickPiggin2006LocklessPageCache
1366,Author="Nick Piggin"
1367,Title="A Lockless Pagecache in Linux---Introduction, Progress, Performance"
1368,Booktitle="{Ottawa Linux Symposium}"
1369,Month="July"
1370,Year="2006"
1371,pages="v2 249-254"
1372,note="Available:
1373\url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184}
1374[Viewed January 11, 2009]"
1375,annotation="
1376 Uses RCU-protected radix tree for a lockless page cache.
1377"
1378}
1379
617@unpublished{PaulEMcKenney2006c 1380@unpublished{PaulEMcKenney2006c
618,Author="Paul E. McKenney" 1381,Author="Paul E. McKenney"
619,Title="Sleepable {RCU}" 1382,Title="Sleepable {RCU}"
@@ -637,29 +1400,301 @@ Revised:
637,day="18" 1400,day="18"
638,year="2006" 1401,year="2006"
639,note="Available: 1402,note="Available:
640\url{http://www.nada.kth.se/~snilsson/public/papers/trash/trash.pdf} 1403\url{http://www.nada.kth.se/~snilsson/publications/TRASH/trash.pdf}
641[Viewed February 24, 2007]" 1404[Viewed March 4, 2011]"
642,annotation=" 1405,annotation="
643 RCU-protected dynamic trie-hash combination. 1406 RCU-protected dynamic trie-hash combination.
644" 1407"
645} 1408}
646 1409
647@unpublished{ThomasEHart2007a 1410@unpublished{ChristophHellwig2006RCU2SRCU
648,Author="Thomas E. Hart and Paul E. McKenney and Angela Demke Brown and Jonathan Walpole" 1411,Author="Christoph Hellwig"
649,Title="Performance of memory reclamation for lockless synchronization" 1412,Title="Re: {[-mm PATCH 1/4]} {RCU}: split classic rcu"
650,journal="J. Parallel Distrib. Comput." 1413,month="September"
1414,day="28"
1415,year="2006"
1416,note="Available:
1417\url{http://lkml.org/lkml/2006/9/28/160}
1418[Viewed March 27, 2008]"
1419}
1420
1421@unpublished{PaulEMcKenneyRCUusagePage
1422,Author="Paul E. McKenney"
1423,Title="{RCU} {Linux} Usage"
1424,month="October"
1425,year="2006"
1426,note="Available:
1427\url{http://www.rdrop.com/users/paulmck/RCU/linuxusage.html}
1428[Viewed January 14, 2007]"
1429,annotation="
1430 Paul McKenney's RCU page showing graphs plotting Linux-kernel
1431 usage of RCU.
1432"
1433}
1434
1435@unpublished{PaulEMcKenneyRCUusageRawDataPage
1436,Author="Paul E. McKenney"
1437,Title="Read-Copy Update {(RCU)} Usage in {Linux} Kernel"
1438,month="October"
1439,year="2006"
1440,note="Available:
1441\url{http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html}
1442[Viewed January 14, 2007]"
1443,annotation="
1444 Paul McKenney's RCU page showing Linux usage of RCU in tabular
1445 form, with links to corresponding cscope databases.
1446"
1447}
1448
1449@unpublished{GauthamShenoy2006RCUrwlock
1450,Author="Gautham R. Shenoy"
1451,Title="[PATCH 4/5] lock\_cpu\_hotplug: Redesign - Lightweight implementation of lock\_cpu\_hotplug"
1452,month="October"
1453,year="2006"
1454,day=26
1455,note="Available:
1456\url{http://lkml.org/lkml/2006/10/26/73}
1457[Viewed January 26, 2009]"
1458,annotation="
1459 RCU-based reader-writer lock that allows readers to proceed with
1460 no memory barriers or atomic instruction in absence of writers.
1461 If writer do show up, readers must of course wait as required by
1462 the semantics of reader-writer locking. This is a recursive
1463 lock.
1464"
1465}
1466
1467@unpublished{JensAxboe2006SlowSRCU
1468,Author="Jens Axboe"
1469,Title="Re: [patch] cpufreq: mark \url{cpufreq_tsc()} as
1470\url{core_initcall_sync}"
1471,month="November"
1472,year="2006"
1473,day=17
1474,note="Available:
1475\url{http://lkml.org/lkml/2006/11/17/56}
1476[Viewed May 28, 2007]"
1477,annotation="
1478 SRCU's grace periods are too slow for Jens, even after a
1479 factor-of-three speedup.
1480 Sped-up version of SRCU at http://lkml.org/lkml/2006/11/17/359.
1481"
1482}
1483
1484@unpublished{OlegNesterov2006QRCU
1485,Author="Oleg Nesterov"
1486,Title="Re: [patch] cpufreq: mark {\tt cpufreq\_tsc()} as
1487{\tt core\_initcall\_sync}"
1488,month="November"
1489,year="2006"
1490,day=19
1491,note="Available:
1492\url{http://lkml.org/lkml/2006/11/19/69}
1493[Viewed May 28, 2007]"
1494,annotation="
1495 First cut of QRCU. Expanded/corrected versions followed.
1496 Used to be OlegNesterov2007QRCU, now time-corrected.
1497"
1498}
1499
1500@unpublished{OlegNesterov2006aQRCU
1501,Author="Oleg Nesterov"
1502,Title="Re: [RFC, PATCH 1/2] qrcu: {"quick"} srcu implementation"
1503,month="November"
1504,year="2006"
1505,day=30
1506,note="Available:
1507\url{http://lkml.org/lkml/2006/11/29/330}
1508[Viewed November 26, 2008]"
1509,annotation="
1510 Expanded/corrected version of QRCU.
1511 Used to be OlegNesterov2007aQRCU, now time-corrected.
1512"
1513}
1514
1515@unpublished{EvgeniyPolyakov2006RCUslowdown
1516,Author="Evgeniy Polyakov"
1517,Title="Badness in postponing work"
1518,month="December"
1519,year="2006"
1520,day=05
1521,note="Available:
1522\url{http://www.ioremap.net/node/41}
1523[Viewed October 28, 2008]"
1524,annotation="
1525 Using RCU as a pure delay leads to a 2.5x slowdown in skbs in
1526 the Linux kernel.
1527"
1528}
1529
1530@inproceedings{ChrisMatthews2006ClusteredObjectsRCU
1531,author = {Matthews, Chris and Coady, Yvonne and Appavoo, Jonathan}
1532,title = {Portability events: a programming model for scalable system infrastructures}
1533,booktitle = {PLOS '06: Proceedings of the 3rd workshop on Programming languages and operating systems}
1534,year = {2006}
1535,isbn = {1-59593-577-0}
1536,pages = {11}
1537,location = {San Jose, California}
1538,doi = {http://doi.acm.org/10.1145/1215995.1216006}
1539,publisher = {ACM}
1540,address = {New York, NY, USA}
1541,annotation={
1542 Uses K42's RCU-like functionality to manage clustered-object
1543 lifetimes.
1544}}
1545
1546@article{DilmaDaSilva2006K42
1547,author = {Silva, Dilma Da and Krieger, Orran and Wisniewski, Robert W. and Waterland, Amos and Tam, David and Baumann, Andrew}
1548,title = {K42: an infrastructure for operating system research}
1549,journal = {SIGOPS Oper. Syst. Rev.}
1550,volume = {40}
1551,number = {2}
1552,year = {2006}
1553,issn = {0163-5980}
1554,pages = {34--42}
1555,doi = {http://doi.acm.org/10.1145/1131322.1131333}
1556,publisher = {ACM}
1557,address = {New York, NY, USA}
1558,annotation={
1559 Describes relationship of K42 generations to RCU.
1560}}
1561
1562# CoreyMinyard2007list_splice_rcu
1563@unpublished{CoreyMinyard2007list:splice:rcu
1564,Author="Corey Minyard and Paul E. McKenney"
1565,Title="{[PATCH]} add an {RCU} version of list splicing"
1566,month="January"
1567,year="2007"
1568,day=3
1569,note="Available:
1570\url{http://lkml.org/lkml/2007/1/3/112}
1571[Viewed May 28, 2007]"
1572,annotation="
1573 Patch for list_splice_rcu().
1574"
1575}
1576
1577@unpublished{PaulEMcKenney2007rcubarrier
1578,Author="Paul E. McKenney"
1579,Title="{RCU} and Unloadable Modules"
1580,month="January"
1581,day="14"
1582,year="2007"
1583,note="Available:
1584\url{http://lwn.net/Articles/217484/}
1585[Viewed November 22, 2007]"
1586,annotation="
1587 LWN article introducing the rcu_barrier() primitive.
1588"
1589}
1590
1591@unpublished{PeterZijlstra2007SyncBarrier
1592,Author="Peter Zijlstra and Ingo Molnar"
1593,Title="{[PATCH 3/7]} barrier: a scalable synchonisation barrier"
1594,month="January"
1595,year="2007"
1596,day=28
1597,note="Available:
1598\url{http://lkml.org/lkml/2007/1/28/34}
1599[Viewed March 27, 2008]"
1600,annotation="
1601 RCU-like implementation for frequent updaters and rare readers(!).
1602 Subsumed into QRCU. Maybe...
1603"
1604}
1605
1606@unpublished{PaulEMcKenney2007BoostRCU
1607,Author="Paul E. McKenney"
1608,Title="Priority-Boosting {RCU} Read-Side Critical Sections"
1609,month="February"
1610,day="5"
1611,year="2007"
1612,note="Available:
1613\url{http://lwn.net/Articles/220677/}
1614Revised:
1615\url{http://www.rdrop.com/users/paulmck/RCU/RCUbooststate.2007.04.16a.pdf}
1616[Viewed September 7, 2007]"
1617,annotation="
1618 LWN article introducing RCU priority boosting.
1619"
1620}
1621
1622@unpublished{PaulMcKenney2007QRCUpatch
1623,Author="Paul E. McKenney"
1624,Title="{[PATCH]} {QRCU} with lockless fastpath"
1625,month="February"
1626,year="2007"
1627,day=24
1628,note="Available:
1629\url{http://lkml.org/lkml/2007/2/25/18}
1630[Viewed March 27, 2008]"
1631,annotation="
1632 Patch for QRCU supplying lock-free fast path.
1633"
1634}
1635
1636@article{JonathanAppavoo2007K42RCU
1637,author = {Appavoo, Jonathan and Silva, Dilma Da and Krieger, Orran and Auslander, Marc and Ostrowski, Michal and Rosenburg, Bryan and Waterland, Amos and Wisniewski, Robert W. and Xenidis, Jimi and Stumm, Michael and Soares, Livio}
1638,title = {Experience distributing objects in an SMMP OS}
1639,journal = {ACM Trans. Comput. Syst.}
1640,volume = {25}
1641,number = {3}
1642,year = {2007}
1643,issn = {0734-2071}
1644,pages = {6/1--6/52}
1645,doi = {http://doi.acm.org/10.1145/1275517.1275518}
1646,publisher = {ACM}
1647,address = {New York, NY, USA}
1648,annotation={
1649 Role of RCU in K42.
1650}}
1651
1652@conference{RobertOlsson2007Trash
1653,Author="Robert Olsson and Stefan Nilsson"
1654,Title="{TRASH}: A dynamic {LC}-trie and hash data structure"
1655,booktitle="Workshop on High Performance Switching and Routing (HPSR'07)"
1656,month="May"
1657,year="2007"
1658,note="Available:
1659\url{http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4281239}
1660[Viewed October 1, 2010]"
1661,annotation="
1662 RCU-protected dynamic trie-hash combination.
1663"
1664}
1665
1666@conference{PeterZijlstra2007ConcurrentPagecacheRCU
1667,Author="Peter Zijlstra"
1668,Title="Concurrent Pagecache"
1669,Booktitle="Linux Symposium"
1670,month="June"
651,year="2007" 1671,year="2007"
652,note="To appear in J. Parallel Distrib. Comput. 1672,address="Ottawa, Canada"
653 \url{doi=10.1016/j.jpdc.2007.04.010}" 1673,note="Available:
1674\url{http://ols.108.redhat.com/2007/Reprints/zijlstra-Reprint.pdf}
1675[Viewed April 14, 2008]"
1676,annotation="
1677 Page-cache modifications permitting RCU readers and concurrent
1678 updates.
1679"
1680}
1681
1682@unpublished{PaulEMcKenney2007whatisRCU
1683,Author="Paul E. McKenney"
1684,Title="What is {RCU}?"
1685,year="2007"
1686,month="07"
1687,note="Available:
1688\url{http://www.rdrop.com/users/paulmck/RCU/whatisRCU.html}
1689[Viewed July 6, 2007]"
654,annotation={ 1690,annotation={
655 Compares QSBR (AKA "classic RCU"), HPBR, EBR, and lock-free 1691 Describes RCU in Linux kernel.
656 reference counting. Journal version of ThomasEHart2006a.
657} 1692}
658} 1693}
659 1694
660@unpublished{PaulEMcKenney2007QRCUspin 1695@unpublished{PaulEMcKenney2007QRCUspin
661,Author="Paul E. McKenney" 1696,Author="Paul E. McKenney"
662,Title="Using Promela and Spin to verify parallel algorithms" 1697,Title="Using {Promela} and {Spin} to verify parallel algorithms"
663,month="August" 1698,month="August"
664,day="1" 1699,day="1"
665,year="2007" 1700,year="2007"
@@ -669,6 +1704,50 @@ Revised:
669,annotation=" 1704,annotation="
670 LWN article describing Promela and spin, and also using Oleg 1705 LWN article describing Promela and spin, and also using Oleg
671 Nesterov's QRCU as an example (with Paul McKenney's fastpath). 1706 Nesterov's QRCU as an example (with Paul McKenney's fastpath).
1707 Merged patch at: http://lkml.org/lkml/2007/2/25/18
1708"
1709}
1710
1711@unpublished{PaulEMcKenney2007WG21DDOatomics
1712,Author="Paul E. McKenney and Hans-J. Boehm and Lawrence Crowl"
1713,Title="C++ Data-Dependency Ordering: Atomics and Memory Model"
1714,month="August"
1715,day="3"
1716,year="2007"
1717,note="Preprint:
1718\url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm}
1719[Viewed December 7, 2009]"
1720,annotation="
1721 RCU for C++, parts 1 and 2.
1722"
1723}
1724
1725@unpublished{PaulEMcKenney2007WG21DDOannotation
1726,Author="Paul E. McKenney and Lawrence Crowl"
1727,Title="C++ Data-Dependency Ordering: Function Annotation"
1728,month="September"
1729,day="18"
1730,year="2008"
1731,note="Preprint:
1732\url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2782.htm}
1733[Viewed December 7, 2009]"
1734,annotation="
1735 RCU for C++, part 2, updated many times.
1736"
1737}
1738
1739@unpublished{PaulEMcKenney2007PreemptibleRCUPatch
1740,Author="Paul E. McKenney"
1741,Title="[PATCH RFC 0/9] {RCU}: Preemptible {RCU}"
1742,month="September"
1743,day="10"
1744,year="2007"
1745,note="Available:
1746\url{http://lkml.org/lkml/2007/9/10/213}
1747[Viewed October 25, 2007]"
1748,annotation="
1749 Final patch for preemptable RCU to -rt. (Later patches were
1750 to mainline, eventually incorporated.)
672" 1751"
673} 1752}
674 1753
@@ -686,10 +1765,46 @@ Revised:
686" 1765"
687} 1766}
688 1767
1768@article{ThomasEHart2007a
1769,Author="Thomas E. Hart and Paul E. McKenney and Angela Demke Brown and Jonathan Walpole"
1770,Title="Performance of memory reclamation for lockless synchronization"
1771,journal="J. Parallel Distrib. Comput."
1772,volume={67}
1773,number="12"
1774,year="2007"
1775,issn="0743-7315"
1776,pages="1270--1285"
1777,doi="http://dx.doi.org/10.1016/j.jpdc.2007.04.010"
1778,publisher="Academic Press, Inc."
1779,address="Orlando, FL, USA"
1780,annotation={
1781 Compares QSBR, HPBR, EBR, and lock-free reference counting.
1782 Journal version of ThomasEHart2006a.
1783}
1784}
1785
1786@unpublished{MathieuDesnoyers2007call:rcu:schedNeeded
1787,Author="Mathieu Desnoyers"
1788,Title="Re: [patch 1/2] {Linux} Kernel Markers - Support Multiple Probes"
1789,month="December"
1790,day="20"
1791,year="2007"
1792,note="Available:
1793\url{http://lkml.org/lkml/2007/12/20/244}
1794[Viewed March 27, 2008]"
1795,annotation="
1796 Request for call_rcu_sched() and rcu_barrier_sched().
1797"
1798}
1799
1800
689######################################################################## 1801########################################################################
690# 1802#
691# "What is RCU?" LWN series. 1803# "What is RCU?" LWN series.
692# 1804#
1805# http://lwn.net/Articles/262464/ (What is RCU, Fundamentally?)
1806# http://lwn.net/Articles/263130/ (What is RCU's Usage?)
1807# http://lwn.net/Articles/264090/ (What is RCU's API?)
693 1808
694@unpublished{PaulEMcKenney2007WhatIsRCUFundamentally 1809@unpublished{PaulEMcKenney2007WhatIsRCUFundamentally
695,Author="Paul E. McKenney and Jonathan Walpole" 1810,Author="Paul E. McKenney and Jonathan Walpole"
@@ -723,7 +1838,7 @@ Revised:
723 3. RCU is a Bulk Reference-Counting Mechanism 1838 3. RCU is a Bulk Reference-Counting Mechanism
724 4. RCU is a Poor Man's Garbage Collector 1839 4. RCU is a Poor Man's Garbage Collector
725 5. RCU is a Way of Providing Existence Guarantees 1840 5. RCU is a Way of Providing Existence Guarantees
726 6. RCU is a Way of Waiting for Things to Finish 1841 6. RCU is a Way of Waiting for Things to Finish
727" 1842"
728} 1843}
729 1844
@@ -747,20 +1862,96 @@ Revised:
747# 1862#
748######################################################################## 1863########################################################################
749 1864
1865
1866@unpublished{SteveRostedt2008dyntickRCUpatch
1867,Author="Steven Rostedt and Paul E. McKenney"
1868,Title="{[PATCH]} add support for dynamic ticks and preempt rcu"
1869,month="January"
1870,day="29"
1871,year="2008"
1872,note="Available:
1873\url{http://lkml.org/lkml/2008/1/29/208}
1874[Viewed March 27, 2008]"
1875,annotation="
1876 Patch that prevents preemptible RCU from unnecessarily waking
1877 up dynticks-idle CPUs.
1878"
1879}
1880
1881@unpublished{PaulEMcKenney2008LKMLDependencyOrdering
1882,Author="Paul E. McKenney"
1883,Title="Re: [PATCH 02/22 -v7] Add basic support for gcc profiler instrumentation"
1884,month="February"
1885,day="1"
1886,year="2008"
1887,note="Available:
1888\url{http://lkml.org/lkml/2008/2/2/255}
1889[Viewed October 18, 2008]"
1890,annotation="
1891 Explanation of compilers violating dependency ordering.
1892"
1893}
1894
1895@Conference{PaulEMcKenney2008Beijing
1896,Author="Paul E. McKenney"
1897,Title="Introducing Technology Into {Linux} Or:
1898Introducing your technology Into {Linux} will require introducing a
1899lot of {Linux} into your technology!!!"
1900,Booktitle="2008 Linux Developer Symposium - China"
1901,Publisher="OSS China"
1902,Month="February"
1903,Year="2008"
1904,Address="Beijing, China"
1905,note="Available:
1906\url{http://www.rdrop.com/users/paulmck/RCU/TechIntroLinux.2008.02.19a.pdf}
1907[Viewed August 12, 2008]"
1908}
1909
1910@unpublished{PaulEMcKenney2008dynticksRCU
1911,Author="Paul E. McKenney and Steven Rostedt"
1912,Title="Integrating and Validating dynticks and Preemptable RCU"
1913,month="April"
1914,day="24"
1915,year="2008"
1916,note="Available:
1917\url{http://lwn.net/Articles/279077/}
1918[Viewed April 24, 2008]"
1919,annotation="
1920 Describes use of Promela and Spin to validate (and fix!) the
1921 dynticks/RCU interface.
1922"
1923}
1924
750@article{DinakarGuniguntala2008IBMSysJ 1925@article{DinakarGuniguntala2008IBMSysJ
751,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole" 1926,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole"
752,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}" 1927,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}"
753,Year="2008" 1928,Year="2008"
754,Month="April" 1929,Month="April-June"
755,journal="IBM Systems Journal" 1930,journal="IBM Systems Journal"
756,volume="47" 1931,volume="47"
757,number="2" 1932,number="2"
758,pages="@@-@@" 1933,pages="221-236"
759,annotation=" 1934,annotation="
760 RCU, realtime RCU, sleepable RCU, performance. 1935 RCU, realtime RCU, sleepable RCU, performance.
761" 1936"
762} 1937}
763 1938
1939@unpublished{LaiJiangshan2008NewClassicAlgorithm
1940,Author="Lai Jiangshan"
1941,Title="[{RFC}][{PATCH}] rcu classic: new algorithm for callbacks-processing"
1942,month="June"
1943,day="3"
1944,year="2008"
1945,note="Available:
1946\url{http://lkml.org/lkml/2008/6/2/539}
1947[Viewed December 10, 2008]"
1948,annotation="
1949 Updated RCU classic algorithm. Introduced multi-tailed list
1950 for RCU callbacks and also pulling common code into
1951 __call_rcu().
1952"
1953}
1954
764@article{PaulEMcKenney2008RCUOSR 1955@article{PaulEMcKenney2008RCUOSR
765,author="Paul E. McKenney and Jonathan Walpole" 1956,author="Paul E. McKenney and Jonathan Walpole"
766,title="Introducing technology into the {Linux} kernel: a case study" 1957,title="Introducing technology into the {Linux} kernel: a case study"
@@ -778,6 +1969,52 @@ Revised:
778} 1969}
779} 1970}
780 1971
1972@unpublished{ManfredSpraul2008StateMachineRCU
1973,Author="Manfred Spraul"
1974,Title="[{RFC}, {PATCH}] state machine based rcu"
1975,month="August"
1976,day="21"
1977,year="2008"
1978,note="Available:
1979\url{http://lkml.org/lkml/2008/8/21/336}
1980[Viewed December 8, 2008]"
1981,annotation="
1982 State-based RCU. One key thing that this patch does is to
1983 separate the dynticks handling of NMIs and IRQs.
1984"
1985}
1986
1987@unpublished{ManfredSpraul2008dyntickIRQNMI
1988,Author="Manfred Spraul"
1989,Title="Re: [{RFC}, {PATCH}] v4 scalable classic {RCU} implementation"
1990,month="September"
1991,day="6"
1992,year="2008"
1993,note="Available:
1994\url{http://lkml.org/lkml/2008/9/6/86}
1995[Viewed December 8, 2008]"
1996,annotation="
1997 Manfred notes a fix required to my attempt to separate irq
1998 and NMI processing for hierarchical RCU's dynticks interface.
1999"
2000}
2001
2002@techreport{PaulEMcKenney2008cyclicRCU
2003,author="Paul E. McKenney"
2004,title="Efficient Support of Consistent Cyclic Search With Read-Copy Update"
2005,institution="US Patent and Trademark Office"
2006,address="Washington, DC"
2007,year="2008"
2008,number="US Patent 7,426,511"
2009,month="September"
2010,pages="23"
2011,annotation="
2012 Maintains an additional level of indirection to allow
2013 readers to confine themselves to the desired snapshot of the
2014 data structure. Only permits one update at a time.
2015"
2016}
2017
781@unpublished{PaulEMcKenney2008HierarchicalRCU 2018@unpublished{PaulEMcKenney2008HierarchicalRCU
782,Author="Paul E. McKenney" 2019,Author="Paul E. McKenney"
783,Title="Hierarchical {RCU}" 2020,Title="Hierarchical {RCU}"
@@ -793,6 +2030,21 @@ Revised:
793" 2030"
794} 2031}
795 2032
2033@unpublished{PaulEMcKenney2009BloatwatchRCU
2034,Author="Paul E. McKenney"
2035,Title="Re: [PATCH fyi] RCU: the bloatwatch edition"
2036,month="January"
2037,day="14"
2038,year="2009"
2039,note="Available:
2040\url{http://lkml.org/lkml/2009/1/14/449}
2041[Viewed January 15, 2009]"
2042,annotation="
2043 Small-footprint implementation of RCU for uniprocessor
2044 embedded applications -- and also for exposition purposes.
2045"
2046}
2047
796@conference{PaulEMcKenney2009MaliciousURCU 2048@conference{PaulEMcKenney2009MaliciousURCU
797,Author="Paul E. McKenney" 2049,Author="Paul E. McKenney"
798,Title="Using a Malicious User-Level {RCU} to Torture {RCU}-Based Algorithms" 2050,Title="Using a Malicious User-Level {RCU} to Torture {RCU}-Based Algorithms"
@@ -816,15 +2068,17 @@ Revised:
816,year="2009" 2068,year="2009"
817,note="Available: 2069,note="Available:
818\url{http://lkml.org/lkml/2009/2/5/572} 2070\url{http://lkml.org/lkml/2009/2/5/572}
819\url{git://lttng.org/userspace-rcu.git} 2071\url{http://lttng.org/urcu}
820[Viewed February 20, 2009]" 2072[Viewed February 20, 2009]"
821,annotation=" 2073,annotation="
822 Mathieu Desnoyers's user-space RCU implementation. 2074 Mathieu Desnoyers's user-space RCU implementation.
823 git://lttng.org/userspace-rcu.git 2075 git://lttng.org/userspace-rcu.git
2076 http://lttng.org/cgi-bin/gitweb.cgi?p=userspace-rcu.git
2077 http://lttng.org/urcu
824" 2078"
825} 2079}
826 2080
827@unpublished{PaulEMcKenney2009BloatWatchRCU 2081@unpublished{PaulEMcKenney2009LWNBloatWatchRCU
828,Author="Paul E. McKenney" 2082,Author="Paul E. McKenney"
829,Title="{RCU}: The {Bloatwatch} Edition" 2083,Title="{RCU}: The {Bloatwatch} Edition"
830,month="March" 2084,month="March"
@@ -852,14 +2106,29 @@ Revised:
852" 2106"
853} 2107}
854 2108
855@unpublished{JoshTriplett2009RPHash 2109@unpublished{PaulEMcKenney2009fastRTRCU
2110,Author="Paul E. McKenney"
2111,Title="[{PATCH} {RFC} -tip 0/4] {RCU} cleanups and simplified preemptable {RCU}"
2112,month="July"
2113,day="23"
2114,year="2009"
2115,note="Available:
2116\url{http://lkml.org/lkml/2009/7/23/294}
2117[Viewed August 15, 2009]"
2118,annotation="
2119 First posting of simple and fast preemptable RCU.
2120"
2121}
2122
2123@InProceedings{JoshTriplett2009RPHash
856,Author="Josh Triplett" 2124,Author="Josh Triplett"
857,Title="Scalable concurrent hash tables via relativistic programming" 2125,Title="Scalable concurrent hash tables via relativistic programming"
858,month="September" 2126,month="September"
859,year="2009" 2127,year="2009"
860,note="Linux Plumbers Conference presentation" 2128,booktitle="Linux Plumbers Conference 2009"
861,annotation=" 2129,annotation="
862 RP fun with hash tables. 2130 RP fun with hash tables.
2131 See also JoshTriplett2010RPHash
863" 2132"
864} 2133}
865 2134
@@ -872,4 +2141,323 @@ Revised:
872,note="Available: 2141,note="Available:
873\url{http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf} 2142\url{http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf}
874[Viewed December 9, 2009]" 2143[Viewed December 9, 2009]"
2144,annotation={
2145 Chapter 6 (page 97) covers user-level RCU.
2146}
2147}
2148
2149@unpublished{RelativisticProgrammingWiki
2150,Author="Josh Triplett and Paul E. McKenney and Jonathan Walpole"
2151,Title="Relativistic Programming"
2152,month="September"
2153,year="2009"
2154,note="Available:
2155\url{http://wiki.cs.pdx.edu/rp/}
2156[Viewed December 9, 2009]"
2157,annotation="
2158 Main Relativistic Programming Wiki.
2159"
2160}
2161
2162@conference{PaulEMcKenney2009DeterministicRCU
2163,Author="Paul E. McKenney"
2164,Title="Deterministic Synchronization in Multicore Systems: the Role of {RCU}"
2165,Booktitle="Eleventh Real Time Linux Workshop"
2166,month="September"
2167,year="2009"
2168,address="Dresden, Germany"
2169,note="Available:
2170\url{http://www.rdrop.com/users/paulmck/realtime/paper/DetSyncRCU.2009.08.18a.pdf}
2171[Viewed January 14, 2009]"
2172}
2173
2174@unpublished{PaulEMcKenney2009HuntingHeisenbugs
2175,Author="Paul E. McKenney"
2176,Title="Hunting Heisenbugs"
2177,month="November"
2178,year="2009"
2179,day="1"
2180,note="Available:
2181\url{http://paulmck.livejournal.com/14639.html}
2182[Viewed June 4, 2010]"
2183,annotation="
2184 Day-one bug in Tree RCU that took forever to track down.
2185"
2186}
2187
2188@unpublished{MathieuDesnoyers2009defer:rcu
2189,Author="Mathieu Desnoyers"
2190,Title="Kernel RCU: shrink the size of the struct rcu\_head"
2191,month="December"
2192,year="2009"
2193,note="Available:
2194\url{http://lkml.org/lkml/2009/10/18/129}
2195[Viewed December 29, 2009]"
2196,annotation="
2197 Mathieu proposed defer_rcu() with fixed-size per-thread pool
2198 of RCU callbacks.
2199"
2200}
2201
2202@unpublished{MathieuDesnoyers2009VerifPrePub
2203,Author="Mathieu Desnoyers and Paul E. McKenney and Michel R. Dagenais"
2204,Title="Multi-Core Systems Modeling for Formal Verification of Parallel Algorithms"
2205,month="December"
2206,year="2009"
2207,note="Submitted to IEEE TPDS"
2208,annotation="
2209 OOMem model for Mathieu's user-level RCU mechanical proof of
2210 correctness.
2211"
2212}
2213
2214@unpublished{MathieuDesnoyers2009URCUPrePub
2215,Author="Mathieu Desnoyers and Paul E. McKenney and Alan Stern and Michel R. Dagenais and Jonathan Walpole"
2216,Title="User-Level Implementations of Read-Copy Update"
2217,month="December"
2218,year="2010"
2219,url=\url{http://www.computer.org/csdl/trans/td/2012/02/ttd2012020375-abs.html}
2220,annotation="
2221 RCU overview, desiderata, semi-formal semantics, user-level RCU
2222 usage scenarios, three classes of RCU implementation, wait-free
2223 RCU updates, RCU grace-period batching, update overhead,
2224 http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf
2225 http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
2226 Superseded by MathieuDesnoyers2012URCU.
2227"
2228}
2229
2230@inproceedings{HariKannan2009DynamicAnalysisRCU
2231,author = {Kannan, Hari}
2232,title = {Ordering decoupled metadata accesses in multiprocessors}
2233,booktitle = {MICRO 42: Proceedings of the 42nd Annual IEEE/ACM International Symposium on Microarchitecture}
2234,year = {2009}
2235,isbn = {978-1-60558-798-1}
2236,pages = {381--390}
2237,location = {New York, New York}
2238,doi = {http://doi.acm.org/10.1145/1669112.1669161}
2239,publisher = {ACM}
2240,address = {New York, NY, USA}
2241,annotation={
2242 Uses RCU to protect metadata used in dynamic analysis.
2243}}
2244
2245@conference{PaulEMcKenney2010SimpleOptRCU
2246,Author="Paul E. McKenney"
2247,Title="Simplicity Through Optimization"
2248,Booktitle="linux.conf.au 2010"
2249,month="January"
2250,year="2010"
2251,address="Wellington, New Zealand"
2252,note="Available:
2253\url{http://www.rdrop.com/users/paulmck/RCU/SimplicityThruOptimization.2010.01.21f.pdf}
2254[Viewed October 10, 2010]"
2255,annotation="
2256 TREE_PREEMPT_RCU optimizations greatly simplified the old
2257 PREEMPT_RCU implementation.
2258"
2259}
2260
2261@unpublished{PaulEMcKenney2010LockdepRCU
2262,Author="Paul E. McKenney"
2263,Title="Lockdep-{RCU}"
2264,month="February"
2265,year="2010"
2266,day="1"
2267,note="Available:
2268\url{https://lwn.net/Articles/371986/}
2269[Viewed June 4, 2010]"
2270,annotation="
2271 CONFIG_PROVE_RCU, or at least an early version.
2272"
2273}
2274
2275@unpublished{AviKivity2010KVM2RCU
2276,Author="Avi Kivity"
2277,Title="[{PATCH} 37/40] {KVM}: Bump maximum vcpu count to 64"
2278,month="February"
2279,year="2010"
2280,note="Available:
2281\url{http://www.mail-archive.com/kvm@vger.kernel.org/msg28640.html}
2282[Viewed March 20, 2010]"
2283,annotation="
2284 Use of RCU permits KVM to increase the size of guest OSes from
2285 16 CPUs to 64 CPUs.
2286"
2287}
2288
2289@unpublished{HerbertXu2010RCUResizeHash
2290,Author="Herbert Xu"
2291,Title="bridge: Add core IGMP snooping support"
2292,month="February"
2293,year="2010"
2294,note="Available:
2295\url{http://kerneltrap.com/mailarchive/linux-netdev/2010/2/26/6270589}
2296[Viewed March 20, 2011]"
2297,annotation={
2298 Use a pair of list_head structures to support RCU-protected
2299 resizable hash tables.
2300}}
2301
2302@article{JoshTriplett2010RPHash
2303,author="Josh Triplett and Paul E. McKenney and Jonathan Walpole"
2304,title="Scalable Concurrent Hash Tables via Relativistic Programming"
2305,journal="ACM Operating Systems Review"
2306,year=2010
2307,volume=44
2308,number=3
2309,month="July"
2310,annotation={
2311 RP fun with hash tables.
2312 http://portal.acm.org/citation.cfm?id=1842733.1842750
2313}}
2314
2315@unpublished{PaulEMcKenney2010RCUAPI
2316,Author="Paul E. McKenney"
2317,Title="The {RCU} {API}, 2010 Edition"
2318,month="December"
2319,day="8"
2320,year="2010"
2321,note="Available:
2322\url{http://lwn.net/Articles/418853/}
2323[Viewed December 8, 2010]"
2324,annotation="
2325 Includes updated software-engineering features.
2326"
2327}
2328
2329@mastersthesis{AndrejPodzimek2010masters
2330,author="Andrej Podzimek"
2331,title="Read-Copy-Update for OpenSolaris"
2332,school="Charles University in Prague"
2333,year="2010"
2334,note="Available:
2335\url{https://andrej.podzimek.org/thesis.pdf}
2336[Viewed January 31, 2011]"
2337,annotation={
2338 Reviews RCU implementations and creates a few for OpenSolaris.
2339 Drives quiescent-state detection from RCU read-side primitives,
2340 in a manner roughly similar to that of Jim Houston.
2341}}
2342
2343@unpublished{LinusTorvalds2011Linux2:6:38:rc1:NPigginVFS
2344,Author="Linus Torvalds"
2345,Title="Linux 2.6.38-rc1"
2346,month="January"
2347,year="2011"
2348,note="Available:
2349\url{https://lkml.org/lkml/2011/1/18/322}
2350[Viewed March 4, 2011]"
2351,annotation={
2352 "The RCU-based name lookup is at the other end of the spectrum - the
2353 absolute anti-gimmick. It's some seriously good stuff, and gets rid of
2354 the last main global lock that really tends to hurt some kernel loads.
2355 The dentry lock is no longer a big serializing issue. What's really
2356 nice about it is that it actually improves performance a lot even for
2357 single-threaded loads (on an SMP kernel), because it gets rid of some
2358 of the most expensive parts of path component lookup, which was the
2359 d_lock on every component lookup. So I'm seeing improvements of 30-50%
2360 on some seriously pathname-lookup intensive loads."
2361}}
2362
2363@techreport{JoshTriplett2011RPScalableCorrectOrdering
2364,author = {Josh Triplett and Philip W. Howard and Paul E. McKenney and Jonathan Walpole}
2365,title = {Scalable Correct Memory Ordering via Relativistic Programming}
2366,year = {2011}
2367,number = {11-03}
2368,institution = {Portland State University}
2369,note = {\url{http://www.cs.pdx.edu/pdfs/tr1103.pdf}}
2370}
2371
2372@inproceedings{PhilHoward2011RCUTMRBTree
2373,author = {Philip W. Howard and Jonathan Walpole}
2374,title = {A Relativistic Enhancement to Software Transactional Memory}
2375,booktitle = {Proceedings of the 3rd USENIX conference on Hot topics in parallelism}
2376,series = {HotPar'11}
2377,year = {2011}
2378,location = {Berkeley, CA}
2379,pages = {1--6}
2380,numpages = {6}
2381,url = {http://www.usenix.org/event/hotpar11/tech/final_files/Howard.pdf}
2382,publisher = {USENIX Association}
2383,address = {Berkeley, CA, USA}
2384}
2385
2386@techreport{PaulEMcKenney2011cyclicparallelRCU
2387,author="Paul E. McKenney and Jonathan Walpole"
2388,title="Efficient Support of Consistent Cyclic Search With Read-Copy Update and Parallel Updates"
2389,institution="US Patent and Trademark Office"
2390,address="Washington, DC"
2391,year="2011"
2392,number="US Patent 7,953,778"
2393,month="May"
2394,pages="34"
2395,annotation="
2396 Maintains an array of generation numbers to track in-flight
2397 updates and keeps an additional level of indirection to allow
2398 readers to confine themselves to the desired snapshot of the
2399 data structure.
2400"
2401}
2402
2403@inproceedings{Triplett:2011:RPHash
2404,author = {Triplett, Josh and McKenney, Paul E. and Walpole, Jonathan}
2405,title = {Resizable, Scalable, Concurrent Hash Tables via Relativistic Programming}
2406,booktitle = {Proceedings of the 2011 USENIX Annual Technical Conference}
2407,month = {June}
2408,year = {2011}
2409,pages = {145--158}
2410,numpages = {14}
2411,url={http://www.usenix.org/event/atc11/tech/final_files/atc11_proceedings.pdf}
2412,publisher = {The USENIX Association}
2413,address = {Portland, OR USA}
2414}
2415
2416@unpublished{PaulEMcKenney2011RCU3.0trainwreck
2417,Author="Paul E. McKenney"
2418,Title="3.0 and {RCU:} what went wrong"
2419,month="July"
2420,day="27"
2421,year="2011"
2422,note="Available:
2423\url{http://lwn.net/Articles/453002/}
2424[Viewed July 27, 2011]"
2425,annotation="
2426 Analysis of the RCU trainwreck in Linux kernel 3.0.
2427"
2428}
2429
2430@unpublished{NeilBrown2011MeetTheLockers
2431,Author="Neil Brown"
2432,Title="Meet the Lockers"
2433,month="August"
2434,day="3"
2435,year="2011"
2436,note="Available:
2437\url{http://lwn.net/Articles/453685/}
2438[Viewed September 2, 2011]"
2439,annotation="
2440 The Locker family as an analogy for locking, reference counting,
2441 RCU, and seqlock.
2442"
2443}
2444
2445@article{MathieuDesnoyers2012URCU
2446,Author="Mathieu Desnoyers and Paul E. McKenney and Alan Stern and Michel R. Dagenais and Jonathan Walpole"
2447,Title="User-Level Implementations of Read-Copy Update"
2448,journal="IEEE Transactions on Parallel and Distributed Systems"
2449,volume={23}
2450,year="2012"
2451,issn="1045-9219"
2452,pages="375-382"
2453,doi="http://doi.ieeecomputersociety.org/10.1109/TPDS.2011.159"
2454,publisher="IEEE Computer Society"
2455,address="Los Alamitos, CA, USA"
2456,annotation={
2457 RCU overview, desiderata, semi-formal semantics, user-level RCU
2458 usage scenarios, three classes of RCU implementation, wait-free
2459 RCU updates, RCU grace-period batching, update overhead,
2460 http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf
2461 http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
2462}
875} 2463}
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index bff2d8be1e18..5c8d74968090 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -180,6 +180,20 @@ over a rather long period of time, but improvements are always welcome!
180 operations that would not normally be undertaken while a real-time 180 operations that would not normally be undertaken while a real-time
181 workload is running. 181 workload is running.
182 182
183 In particular, if you find yourself invoking one of the expedited
184 primitives repeatedly in a loop, please do everyone a favor:
185 Restructure your code so that it batches the updates, allowing
186 a single non-expedited primitive to cover the entire batch.
187 This will very likely be faster than the loop containing the
188 expedited primitive, and will be much much easier on the rest
189 of the system, especially to real-time workloads running on
190 the rest of the system.
191
192 In addition, it is illegal to call the expedited forms from
193 a CPU-hotplug notifier, or while holding a lock that is acquired
194 by a CPU-hotplug notifier. Failing to observe this restriction
195 will result in deadlock.
196
1837. If the updater uses call_rcu() or synchronize_rcu(), then the 1977. If the updater uses call_rcu() or synchronize_rcu(), then the
184 corresponding readers must use rcu_read_lock() and 198 corresponding readers must use rcu_read_lock() and
185 rcu_read_unlock(). If the updater uses call_rcu_bh() or 199 rcu_read_unlock(). If the updater uses call_rcu_bh() or
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 083d88cbc089..523364e4e1f1 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -12,14 +12,38 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
12 This kernel configuration parameter defines the period of time 12 This kernel configuration parameter defines the period of time
13 that RCU will wait from the beginning of a grace period until it 13 that RCU will wait from the beginning of a grace period until it
14 issues an RCU CPU stall warning. This time period is normally 14 issues an RCU CPU stall warning. This time period is normally
15 ten seconds. 15 sixty seconds.
16 16
17RCU_SECONDS_TILL_STALL_RECHECK 17 This configuration parameter may be changed at runtime via the
18 /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however
19 this parameter is checked only at the beginning of a cycle.
20 So if you are 30 seconds into a 70-second stall, setting this
21 sysfs parameter to (say) five will shorten the timeout for the
22 -next- stall, or the following warning for the current stall
23 (assuming the stall lasts long enough). It will not affect the
24 timing of the next warning for the current stall.
18 25
19 This macro defines the period of time that RCU will wait after 26 Stall-warning messages may be enabled and disabled completely via
20 issuing a stall warning until it issues another stall warning 27 /sys/module/rcutree/parameters/rcu_cpu_stall_suppress.
21 for the same stall. This time period is normally set to three 28
22 times the check interval plus thirty seconds. 29CONFIG_RCU_CPU_STALL_VERBOSE
30
31 This kernel configuration parameter causes the stall warning to
32 also dump the stacks of any tasks that are blocking the current
33 RCU-preempt grace period.
34
35RCU_CPU_STALL_INFO
36
37 This kernel configuration parameter causes the stall warning to
38 print out additional per-CPU diagnostic information, including
39 information on scheduling-clock ticks and RCU's idle-CPU tracking.
40
41RCU_STALL_DELAY_DELTA
42
43 Although the lockdep facility is extremely useful, it does add
44 some overhead. Therefore, under CONFIG_PROVE_RCU, the
45 RCU_STALL_DELAY_DELTA macro allows five extra seconds before
46 giving an RCU CPU stall warning message.
23 47
24RCU_STALL_RAT_DELAY 48RCU_STALL_RAT_DELAY
25 49
@@ -64,6 +88,54 @@ INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffi
64 88
65This is rare, but does happen from time to time in real life. 89This is rare, but does happen from time to time in real life.
66 90
91If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set,
92more information is printed with the stall-warning message, for example:
93
94 INFO: rcu_preempt detected stall on CPU
95 0: (63959 ticks this GP) idle=241/3fffffffffffffff/0
96 (t=65000 jiffies)
97
98In kernels with CONFIG_RCU_FAST_NO_HZ, even more information is
99printed:
100
101 INFO: rcu_preempt detected stall on CPU
102 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 drain=0 . timer=-1
103 (t=65000 jiffies)
104
105The "(64628 ticks this GP)" indicates that this CPU has taken more
106than 64,000 scheduling-clock interrupts during the current stalled
107grace period. If the CPU was not yet aware of the current grace
108period (for example, if it was offline), then this part of the message
109indicates how many grace periods behind the CPU is.
110
111The "idle=" portion of the message prints the dyntick-idle state.
112The hex number before the first "/" is the low-order 12 bits of the
113dynticks counter, which will have an even-numbered value if the CPU is
114in dyntick-idle mode and an odd-numbered value otherwise. The hex
115number between the two "/"s is the value of the nesting, which will
116be a small positive number if in the idle loop and a very large positive
117number (as shown above) otherwise.
118
119For CONFIG_RCU_FAST_NO_HZ kernels, the "drain=0" indicates that the
120CPU is not in the process of trying to force itself into dyntick-idle
121state, the "." indicates that the CPU has not given up forcing RCU
122into dyntick-idle mode (it would be "H" otherwise), and the "timer=-1"
123indicates that the CPU has not recented forced RCU into dyntick-idle
124mode (it would otherwise indicate the number of microseconds remaining
125in this forced state).
126
127
128Multiple Warnings From One Stall
129
130If a stall lasts long enough, multiple stall-warning messages will be
131printed for it. The second and subsequent messages are printed at
132longer intervals, so that the time between (say) the first and second
133message will be about three times the interval between the beginning
134of the stall and the first message.
135
136
137What Causes RCU CPU Stall Warnings?
138
67So your kernel printed an RCU CPU stall warning. The next question is 139So your kernel printed an RCU CPU stall warning. The next question is
68"What caused it?" The following problems can result in RCU CPU stall 140"What caused it?" The following problems can result in RCU CPU stall
69warnings: 141warnings:
@@ -128,4 +200,5 @@ is occurring, which will usually be in the function nearest the top of
128that portion of the stack which remains the same from trace to trace. 200that portion of the stack which remains the same from trace to trace.
129If you can reliably trigger the stall, ftrace can be quite helpful. 201If you can reliably trigger the stall, ftrace can be quite helpful.
130 202
131RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. 203RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE
204and with RCU's event tracing.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index d67068d0d2b9..375d3fb71437 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -69,6 +69,13 @@ onoff_interval
69 CPU-hotplug operations regardless of what value is 69 CPU-hotplug operations regardless of what value is
70 specified for onoff_interval. 70 specified for onoff_interval.
71 71
72onoff_holdoff The number of seconds to wait until starting CPU-hotplug
73 operations. This would normally only be used when
74 rcutorture was built into the kernel and started
75 automatically at boot time, in which case it is useful
76 in order to avoid confusing boot-time code with CPUs
77 coming and going.
78
72shuffle_interval 79shuffle_interval
73 The number of seconds to keep the test threads affinitied 80 The number of seconds to keep the test threads affinitied
74 to a particular subset of the CPUs, defaults to 3 seconds. 81 to a particular subset of the CPUs, defaults to 3 seconds.
@@ -79,6 +86,24 @@ shutdown_secs The number of seconds to run the test before terminating
79 zero, which disables test termination and system shutdown. 86 zero, which disables test termination and system shutdown.
80 This capability is useful for automated testing. 87 This capability is useful for automated testing.
81 88
89stall_cpu The number of seconds that a CPU should be stalled while
90 within both an rcu_read_lock() and a preempt_disable().
91 This stall happens only once per rcutorture run.
92 If you need multiple stalls, use modprobe and rmmod to
93 repeatedly run rcutorture. The default for stall_cpu
94 is zero, which prevents rcutorture from stalling a CPU.
95
96 Note that attempts to rmmod rcutorture while the stall
97 is ongoing will hang, so be careful what value you
98 choose for this module parameter! In addition, too-large
99 values for stall_cpu might well induce failures and
100 warnings in other parts of the kernel. You have been
101 warned!
102
103stall_cpu_holdoff
104 The number of seconds to wait after rcutorture starts
105 before stalling a CPU. Defaults to 10 seconds.
106
82stat_interval The number of seconds between output of torture 107stat_interval The number of seconds between output of torture
83 statistics (via printk()). Regardless of the interval, 108 statistics (via printk()). Regardless of the interval,
84 statistics are printed when the module is unloaded. 109 statistics are printed when the module is unloaded.
@@ -271,11 +296,13 @@ The following script may be used to torture RCU:
271 #!/bin/sh 296 #!/bin/sh
272 297
273 modprobe rcutorture 298 modprobe rcutorture
274 sleep 100 299 sleep 3600
275 rmmod rcutorture 300 rmmod rcutorture
276 dmesg | grep torture: 301 dmesg | grep torture:
277 302
278The output can be manually inspected for the error flag of "!!!". 303The output can be manually inspected for the error flag of "!!!".
279One could of course create a more elaborate script that automatically 304One could of course create a more elaborate script that automatically
280checked for such errors. The "rmmod" command forces a "SUCCESS" or 305checked for such errors. The "rmmod" command forces a "SUCCESS",
281"FAILURE" indication to be printk()ed. 306"FAILURE", or "RCU_HOTPLUG" indication to be printk()ed. The first
307two are self-explanatory, while the last indicates that while there
308were no RCU failures, CPU-hotplug problems were detected.
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 49587abfc2f7..f6f15ce39903 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -33,23 +33,23 @@ rcu/rcuboost:
33The output of "cat rcu/rcudata" looks as follows: 33The output of "cat rcu/rcudata" looks as follows:
34 34
35rcu_sched: 35rcu_sched:
36 0 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0 36 0 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=545/1/0 df=50 of=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0
37 1 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0 37 1 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=967/1/0 df=58 of=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0
38 2 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0 38 2 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1081/1/0 df=175 of=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0
39 3 c=20942 g=20943 pq=1 pgp=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0 39 3 c=20942 g=20943 pq=1 pgp=20942 qp=1 dt=1846/0/0 df=404 of=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0
40 4 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0 40 4 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=369/1/0 df=83 of=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0
41 5 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0 41 5 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=381/1/0 df=64 of=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0
42 6 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0 42 6 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1037/1/0 df=183 of=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0
43 7 c=20897 g=20897 pq=1 pgp=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0 43 7 c=20897 g=20897 pq=1 pgp=20896 qp=0 dt=1572/0/0 df=382 of=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0
44rcu_bh: 44rcu_bh:
45 0 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0 45 0 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=545/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0
46 1 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0 46 1 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=967/1/0 df=3 of=0 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0
47 2 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0 47 2 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1081/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0
48 3 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0 48 3 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1846/0/0 df=8 of=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0
49 4 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0 49 4 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=369/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0
50 5 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0 50 5 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=381/1/0 df=4 of=0 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0
51 6 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0 51 6 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1037/1/0 df=6 of=0 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0
52 7 c=1474 g=1474 pq=1 pgp=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0 52 7 c=1474 g=1474 pq=1 pgp=1473 qp=0 dt=1572/0/0 df=8 of=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0
53 53
54The first section lists the rcu_data structures for rcu_sched, the second 54The first section lists the rcu_data structures for rcu_sched, the second
55for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an 55for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an
@@ -119,10 +119,6 @@ o "of" is the number of times that some other CPU has forced a
119 CPU is offline when it is really alive and kicking) is a fatal 119 CPU is offline when it is really alive and kicking) is a fatal
120 error, so it makes sense to err conservatively. 120 error, so it makes sense to err conservatively.
121 121
122o "ri" is the number of times that RCU has seen fit to send a
123 reschedule IPI to this CPU in order to get it to report a
124 quiescent state.
125
126o "ql" is the number of RCU callbacks currently residing on 122o "ql" is the number of RCU callbacks currently residing on
127 this CPU. This is the total number of callbacks, regardless 123 this CPU. This is the total number of callbacks, regardless
128 of what state they are in (new, waiting for grace period to 124 of what state they are in (new, waiting for grace period to
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b9a7fdd9c814..e30b2dfa8ba0 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -165,13 +165,6 @@ static inline int ext_hash(u16 code)
165 return (code + (code >> 9)) & 0xff; 165 return (code + (code >> 9)) & 0xff;
166} 166}
167 167
168static void ext_int_hash_update(struct rcu_head *head)
169{
170 struct ext_int_info *p = container_of(head, struct ext_int_info, rcu);
171
172 kfree(p);
173}
174
175int register_external_interrupt(u16 code, ext_int_handler_t handler) 168int register_external_interrupt(u16 code, ext_int_handler_t handler)
176{ 169{
177 struct ext_int_info *p; 170 struct ext_int_info *p;
@@ -202,7 +195,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
202 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 195 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
203 if (p->code == code && p->handler == handler) { 196 if (p->code == code && p->handler == handler) {
204 list_del_rcu(&p->entry); 197 list_del_rcu(&p->entry);
205 call_rcu(&p->rcu, ext_int_hash_update); 198 kfree_rcu(p, rcu);
206 } 199 }
207 spin_unlock_irqrestore(&ext_int_hash_lock, flags); 200 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
208 return 0; 201 return 0;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4c0507cf808c..eff512b5a2a0 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -86,16 +86,6 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
86} 86}
87 87
88/* 88/*
89 * Free tport via RCU.
90 */
91static void ft_tport_rcu_free(struct rcu_head *rcu)
92{
93 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
94
95 kfree(tport);
96}
97
98/*
99 * Delete a target local port. 89 * Delete a target local port.
100 * Caller holds ft_lport_lock. 90 * Caller holds ft_lport_lock.
101 */ 91 */
@@ -114,7 +104,7 @@ static void ft_tport_delete(struct ft_tport *tport)
114 tpg->tport = NULL; 104 tpg->tport = NULL;
115 tport->tpg = NULL; 105 tport->tpg = NULL;
116 } 106 }
117 call_rcu(&tport->rcu, ft_tport_rcu_free); 107 kfree_rcu(tport, rcu);
118} 108}
119 109
120/* 110/*
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 81c04f4348ec..937217425c47 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,33 @@ extern void rcu_idle_exit(void);
190extern void rcu_irq_enter(void); 190extern void rcu_irq_enter(void);
191extern void rcu_irq_exit(void); 191extern void rcu_irq_exit(void);
192 192
193/**
194 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
195 * @a: Code that RCU needs to pay attention to.
196 *
197 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
198 * in the inner idle loop, that is, between the rcu_idle_enter() and
199 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
200 * critical sections. However, things like powertop need tracepoints
201 * in the inner idle loop.
202 *
203 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
204 * will tell RCU that it needs to pay attending, invoke its argument
205 * (in this example, a call to the do_something_with_RCU() function),
206 * and then tell RCU to go back to ignoring this CPU. It is permissible
207 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
208 * quite limited. If deeper nesting is required, it will be necessary
209 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
210 *
211 * This macro may be used from process-level code only.
212 */
213#define RCU_NONIDLE(a) \
214 do { \
215 rcu_idle_exit(); \
216 do { a; } while (0); \
217 rcu_idle_enter(); \
218 } while (0)
219
193/* 220/*
194 * Infrastructure to implement the synchronize_() primitives in 221 * Infrastructure to implement the synchronize_() primitives in
195 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 222 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
226} 253}
227#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 254#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
228 255
256#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
257bool rcu_lockdep_current_cpu_online(void);
258#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
259static inline bool rcu_lockdep_current_cpu_online(void)
260{
261 return 1;
262}
263#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
264
229#ifdef CONFIG_DEBUG_LOCK_ALLOC 265#ifdef CONFIG_DEBUG_LOCK_ALLOC
230 266
231#ifdef CONFIG_PROVE_RCU 267#ifdef CONFIG_PROVE_RCU
@@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void)
239 275
240static inline void rcu_lock_acquire(struct lockdep_map *map) 276static inline void rcu_lock_acquire(struct lockdep_map *map)
241{ 277{
242 WARN_ON_ONCE(rcu_is_cpu_idle());
243 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); 278 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
244} 279}
245 280
246static inline void rcu_lock_release(struct lockdep_map *map) 281static inline void rcu_lock_release(struct lockdep_map *map)
247{ 282{
248 WARN_ON_ONCE(rcu_is_cpu_idle());
249 lock_release(map, 1, _THIS_IP_); 283 lock_release(map, 1, _THIS_IP_);
250} 284}
251 285
@@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void);
270 * occur in the same context, for example, it is illegal to invoke 304 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock() 305 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler. 306 * was invoked from within an irq handler.
307 *
308 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
309 * offline from an RCU perspective, so check for those as well.
273 */ 310 */
274static inline int rcu_read_lock_held(void) 311static inline int rcu_read_lock_held(void)
275{ 312{
@@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void)
277 return 1; 314 return 1;
278 if (rcu_is_cpu_idle()) 315 if (rcu_is_cpu_idle())
279 return 0; 316 return 0;
317 if (!rcu_lockdep_current_cpu_online())
318 return 0;
280 return lock_is_held(&rcu_lock_map); 319 return lock_is_held(&rcu_lock_map);
281} 320}
282 321
@@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void);
313 * notice an extended quiescent state to other CPUs that started a grace 352 * notice an extended quiescent state to other CPUs that started a grace
314 * period. Otherwise we would delay any grace period as long as we run in 353 * period. Otherwise we would delay any grace period as long as we run in
315 * the idle task. 354 * the idle task.
355 *
356 * Similarly, we avoid claiming an SRCU read lock held if the current
357 * CPU is offline.
316 */ 358 */
317#ifdef CONFIG_PREEMPT_COUNT 359#ifdef CONFIG_PREEMPT_COUNT
318static inline int rcu_read_lock_sched_held(void) 360static inline int rcu_read_lock_sched_held(void)
@@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void)
323 return 1; 365 return 1;
324 if (rcu_is_cpu_idle()) 366 if (rcu_is_cpu_idle())
325 return 0; 367 return 0;
368 if (!rcu_lockdep_current_cpu_online())
369 return 0;
326 if (debug_locks) 370 if (debug_locks)
327 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 371 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
328 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 372 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void);
381 } \ 425 } \
382 } while (0) 426 } while (0)
383 427
428#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
429static inline void rcu_preempt_sleep_check(void)
430{
431 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
432 "Illegal context switch in RCU read-side "
433 "critical section");
434}
435#else /* #ifdef CONFIG_PROVE_RCU */
436static inline void rcu_preempt_sleep_check(void)
437{
438}
439#endif /* #else #ifdef CONFIG_PROVE_RCU */
440
384#define rcu_sleep_check() \ 441#define rcu_sleep_check() \
385 do { \ 442 do { \
443 rcu_preempt_sleep_check(); \
386 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 444 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
387 "Illegal context switch in RCU-bh" \ 445 "Illegal context switch in RCU-bh" \
388 " read-side critical section"); \ 446 " read-side critical section"); \
@@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void);
470 * NULL. Although rcu_access_pointer() may also be used in cases where 528 * NULL. Although rcu_access_pointer() may also be used in cases where
471 * update-side locks prevent the value of the pointer from changing, you 529 * update-side locks prevent the value of the pointer from changing, you
472 * should instead use rcu_dereference_protected() for this use case. 530 * should instead use rcu_dereference_protected() for this use case.
531 *
532 * It is also permissible to use rcu_access_pointer() when read-side
533 * access to the pointer was removed at least one grace period ago, as
534 * is the case in the context of the RCU callback that is freeing up
535 * the data, or after a synchronize_rcu() returns. This can be useful
536 * when tearing down multi-linked structures after a grace period
537 * has elapsed.
473 */ 538 */
474#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 539#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
475 540
@@ -659,6 +724,8 @@ static inline void rcu_read_lock(void)
659 __rcu_read_lock(); 724 __rcu_read_lock();
660 __acquire(RCU); 725 __acquire(RCU);
661 rcu_lock_acquire(&rcu_lock_map); 726 rcu_lock_acquire(&rcu_lock_map);
727 rcu_lockdep_assert(!rcu_is_cpu_idle(),
728 "rcu_read_lock() used illegally while idle");
662} 729}
663 730
664/* 731/*
@@ -678,6 +745,8 @@ static inline void rcu_read_lock(void)
678 */ 745 */
679static inline void rcu_read_unlock(void) 746static inline void rcu_read_unlock(void)
680{ 747{
748 rcu_lockdep_assert(!rcu_is_cpu_idle(),
749 "rcu_read_unlock() used illegally while idle");
681 rcu_lock_release(&rcu_lock_map); 750 rcu_lock_release(&rcu_lock_map);
682 __release(RCU); 751 __release(RCU);
683 __rcu_read_unlock(); 752 __rcu_read_unlock();
@@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void)
705 local_bh_disable(); 774 local_bh_disable();
706 __acquire(RCU_BH); 775 __acquire(RCU_BH);
707 rcu_lock_acquire(&rcu_bh_lock_map); 776 rcu_lock_acquire(&rcu_bh_lock_map);
777 rcu_lockdep_assert(!rcu_is_cpu_idle(),
778 "rcu_read_lock_bh() used illegally while idle");
708} 779}
709 780
710/* 781/*
@@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void)
714 */ 785 */
715static inline void rcu_read_unlock_bh(void) 786static inline void rcu_read_unlock_bh(void)
716{ 787{
788 rcu_lockdep_assert(!rcu_is_cpu_idle(),
789 "rcu_read_unlock_bh() used illegally while idle");
717 rcu_lock_release(&rcu_bh_lock_map); 790 rcu_lock_release(&rcu_bh_lock_map);
718 __release(RCU_BH); 791 __release(RCU_BH);
719 local_bh_enable(); 792 local_bh_enable();
@@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void)
737 preempt_disable(); 810 preempt_disable();
738 __acquire(RCU_SCHED); 811 __acquire(RCU_SCHED);
739 rcu_lock_acquire(&rcu_sched_lock_map); 812 rcu_lock_acquire(&rcu_sched_lock_map);
813 rcu_lockdep_assert(!rcu_is_cpu_idle(),
814 "rcu_read_lock_sched() used illegally while idle");
740} 815}
741 816
742/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 817/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
753 */ 828 */
754static inline void rcu_read_unlock_sched(void) 829static inline void rcu_read_unlock_sched(void)
755{ 830{
831 rcu_lockdep_assert(!rcu_is_cpu_idle(),
832 "rcu_read_unlock_sched() used illegally while idle");
756 rcu_lock_release(&rcu_sched_lock_map); 833 rcu_lock_release(&rcu_sched_lock_map);
757 __release(RCU_SCHED); 834 __release(RCU_SCHED);
758 preempt_enable(); 835 preempt_enable();
@@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
841 /* See the kfree_rcu() header comment. */ 918 /* See the kfree_rcu() header comment. */
842 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); 919 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
843 920
844 call_rcu(head, (rcu_callback)offset); 921 kfree_call_rcu(head, (rcu_callback)offset);
845} 922}
846 923
847/** 924/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 00b7a5e493d2..e93df77176d1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,13 +27,9 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30#ifdef CONFIG_RCU_BOOST
31static inline void rcu_init(void) 30static inline void rcu_init(void)
32{ 31{
33} 32}
34#else /* #ifdef CONFIG_RCU_BOOST */
35void rcu_init(void);
36#endif /* #else #ifdef CONFIG_RCU_BOOST */
37 33
38static inline void rcu_barrier_bh(void) 34static inline void rcu_barrier_bh(void)
39{ 35{
@@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void)
83 synchronize_sched(); 79 synchronize_sched();
84} 80}
85 81
82static inline void kfree_call_rcu(struct rcu_head *head,
83 void (*func)(struct rcu_head *rcu))
84{
85 call_rcu(head, func);
86}
87
86#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
87 89
88static inline void rcu_preempt_note_context_switch(void) 90static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a8..e8ee5dd0854c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void);
61extern void synchronize_sched_expedited(void); 61extern void synchronize_sched_expedited(void);
62extern void synchronize_rcu_expedited(void); 62extern void synchronize_rcu_expedited(void);
63 63
64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
65
66/**
67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
68 *
69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
70 * approach to force the grace period to end quickly. This consumes
71 * significant time on all CPUs and is unfriendly to real-time workloads,
72 * so is thus not recommended for any sort of common-case code. In fact,
73 * if you are using synchronize_rcu_bh_expedited() in a loop, please
74 * restructure your code to batch your updates, and then use a single
75 * synchronize_rcu_bh() instead.
76 *
77 * Note that it is illegal to call this function while holding any lock
78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
79 * to call this function from a CPU-hotplug notifier. Failing to observe
80 * these restriction will result in deadlock.
81 */
64static inline void synchronize_rcu_bh_expedited(void) 82static inline void synchronize_rcu_bh_expedited(void)
65{ 83{
66 synchronize_sched_expedited(); 84 synchronize_sched_expedited();
@@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void);
83/* A context switch is a grace period for RCU-sched and RCU-bh. */ 101/* A context switch is a grace period for RCU-sched and RCU-bh. */
84static inline int rcu_blocking_is_gp(void) 102static inline int rcu_blocking_is_gp(void)
85{ 103{
104 might_sleep(); /* Check for RCU read-side critical section. */
86 return num_online_cpus() == 1; 105 return num_online_cpus() == 1;
87} 106}
88 107
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d379a6bfd88..e692abaf915a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1864,8 +1864,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1864#ifdef CONFIG_PREEMPT_RCU 1864#ifdef CONFIG_PREEMPT_RCU
1865 1865
1866#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1866#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1867#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1867#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1868#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1869 1868
1870static inline void rcu_copy_process(struct task_struct *p) 1869static inline void rcu_copy_process(struct task_struct *p)
1871{ 1870{
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e1b005918bbb..d3d5fa54f25e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp);
99 * power mode. This way we can notice an extended quiescent state to 99 * power mode. This way we can notice an extended quiescent state to
100 * other CPUs that started a grace period. Otherwise we would delay any 100 * other CPUs that started a grace period. Otherwise we would delay any
101 * grace period as long as we run in the idle task. 101 * grace period as long as we run in the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
102 */ 105 */
103static inline int srcu_read_lock_held(struct srcu_struct *sp) 106static inline int srcu_read_lock_held(struct srcu_struct *sp)
104{ 107{
105 if (rcu_is_cpu_idle())
106 return 0;
107
108 if (!debug_lockdep_rcu_enabled()) 108 if (!debug_lockdep_rcu_enabled())
109 return 1; 109 return 1;
110 110 if (rcu_is_cpu_idle())
111 return 0;
112 if (!rcu_lockdep_current_cpu_online())
113 return 0;
111 return lock_is_held(&sp->dep_map); 114 return lock_is_held(&sp->dep_map);
112} 115}
113 116
@@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
169 int retval = __srcu_read_lock(sp); 172 int retval = __srcu_read_lock(sp);
170 173
171 rcu_lock_acquire(&(sp)->dep_map); 174 rcu_lock_acquire(&(sp)->dep_map);
175 rcu_lockdep_assert(!rcu_is_cpu_idle(),
176 "srcu_read_lock() used illegally while idle");
172 return retval; 177 return retval;
173} 178}
174 179
@@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
182static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 187static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
183 __releases(sp) 188 __releases(sp)
184{ 189{
190 rcu_lockdep_assert(!rcu_is_cpu_idle(),
191 "srcu_read_unlock() used illegally while idle");
185 rcu_lock_release(&(sp)->dep_map); 192 rcu_lock_release(&(sp)->dep_map);
186 __srcu_read_unlock(sp, idx); 193 __srcu_read_unlock(sp, idx);
187} 194}
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d2d88bed891b..337099783f37 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
313/* 313/*
314 * Tracepoint for the registration of a single RCU callback function. 314 * Tracepoint for the registration of a single RCU callback function.
315 * The first argument is the type of RCU, the second argument is 315 * The first argument is the type of RCU, the second argument is
316 * a pointer to the RCU callback itself, and the third element is the 316 * a pointer to the RCU callback itself, the third element is the
317 * new RCU callback queue length for the current CPU. 317 * number of lazy callbacks queued, and the fourth element is the
318 * total number of callbacks queued.
318 */ 319 */
319TRACE_EVENT(rcu_callback, 320TRACE_EVENT(rcu_callback,
320 321
321 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen), 322 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
323 long qlen),
322 324
323 TP_ARGS(rcuname, rhp, qlen), 325 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
324 326
325 TP_STRUCT__entry( 327 TP_STRUCT__entry(
326 __field(char *, rcuname) 328 __field(char *, rcuname)
327 __field(void *, rhp) 329 __field(void *, rhp)
328 __field(void *, func) 330 __field(void *, func)
331 __field(long, qlen_lazy)
329 __field(long, qlen) 332 __field(long, qlen)
330 ), 333 ),
331 334
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
333 __entry->rcuname = rcuname; 336 __entry->rcuname = rcuname;
334 __entry->rhp = rhp; 337 __entry->rhp = rhp;
335 __entry->func = rhp->func; 338 __entry->func = rhp->func;
339 __entry->qlen_lazy = qlen_lazy;
336 __entry->qlen = qlen; 340 __entry->qlen = qlen;
337 ), 341 ),
338 342
339 TP_printk("%s rhp=%p func=%pf %ld", 343 TP_printk("%s rhp=%p func=%pf %ld/%ld",
340 __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen) 344 __entry->rcuname, __entry->rhp, __entry->func,
345 __entry->qlen_lazy, __entry->qlen)
341); 346);
342 347
343/* 348/*
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
345 * kfree() form. The first argument is the RCU type, the second argument 350 * kfree() form. The first argument is the RCU type, the second argument
346 * is a pointer to the RCU callback, the third argument is the offset 351 * is a pointer to the RCU callback, the third argument is the offset
347 * of the callback within the enclosing RCU-protected data structure, 352 * of the callback within the enclosing RCU-protected data structure,
348 * and the fourth argument is the new RCU callback queue length for the 353 * the fourth argument is the number of lazy callbacks queued, and the
349 * current CPU. 354 * fifth argument is the total number of callbacks queued.
350 */ 355 */
351TRACE_EVENT(rcu_kfree_callback, 356TRACE_EVENT(rcu_kfree_callback,
352 357
353 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, 358 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
354 long qlen), 359 long qlen_lazy, long qlen),
355 360
356 TP_ARGS(rcuname, rhp, offset, qlen), 361 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
357 362
358 TP_STRUCT__entry( 363 TP_STRUCT__entry(
359 __field(char *, rcuname) 364 __field(char *, rcuname)
360 __field(void *, rhp) 365 __field(void *, rhp)
361 __field(unsigned long, offset) 366 __field(unsigned long, offset)
367 __field(long, qlen_lazy)
362 __field(long, qlen) 368 __field(long, qlen)
363 ), 369 ),
364 370
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
366 __entry->rcuname = rcuname; 372 __entry->rcuname = rcuname;
367 __entry->rhp = rhp; 373 __entry->rhp = rhp;
368 __entry->offset = offset; 374 __entry->offset = offset;
375 __entry->qlen_lazy = qlen_lazy;
369 __entry->qlen = qlen; 376 __entry->qlen = qlen;
370 ), 377 ),
371 378
372 TP_printk("%s rhp=%p func=%ld %ld", 379 TP_printk("%s rhp=%p func=%ld %ld/%ld",
373 __entry->rcuname, __entry->rhp, __entry->offset, 380 __entry->rcuname, __entry->rhp, __entry->offset,
374 __entry->qlen) 381 __entry->qlen_lazy, __entry->qlen)
375); 382);
376 383
377/* 384/*
378 * Tracepoint for marking the beginning rcu_do_batch, performed to start 385 * Tracepoint for marking the beginning rcu_do_batch, performed to start
379 * RCU callback invocation. The first argument is the RCU flavor, 386 * RCU callback invocation. The first argument is the RCU flavor,
380 * the second is the total number of callbacks (including those that 387 * the second is the number of lazy callbacks queued, the third is
381 * are not yet ready to be invoked), and the third argument is the 388 * the total number of callbacks queued, and the fourth argument is
382 * current RCU-callback batch limit. 389 * the current RCU-callback batch limit.
383 */ 390 */
384TRACE_EVENT(rcu_batch_start, 391TRACE_EVENT(rcu_batch_start,
385 392
386 TP_PROTO(char *rcuname, long qlen, int blimit), 393 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
387 394
388 TP_ARGS(rcuname, qlen, blimit), 395 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
389 396
390 TP_STRUCT__entry( 397 TP_STRUCT__entry(
391 __field(char *, rcuname) 398 __field(char *, rcuname)
399 __field(long, qlen_lazy)
392 __field(long, qlen) 400 __field(long, qlen)
393 __field(int, blimit) 401 __field(int, blimit)
394 ), 402 ),
395 403
396 TP_fast_assign( 404 TP_fast_assign(
397 __entry->rcuname = rcuname; 405 __entry->rcuname = rcuname;
406 __entry->qlen_lazy = qlen_lazy;
398 __entry->qlen = qlen; 407 __entry->qlen = qlen;
399 __entry->blimit = blimit; 408 __entry->blimit = blimit;
400 ), 409 ),
401 410
402 TP_printk("%s CBs=%ld bl=%d", 411 TP_printk("%s CBs=%ld/%ld bl=%d",
403 __entry->rcuname, __entry->qlen, __entry->blimit) 412 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
413 __entry->blimit)
404); 414);
405 415
406/* 416/*
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
531#else /* #ifdef CONFIG_RCU_TRACE */ 541#else /* #ifdef CONFIG_RCU_TRACE */
532 542
533#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 543#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
534#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0) 544#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
545 qsmask) do { } while (0)
535#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 546#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
536#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 547#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
537#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) 548#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
549 grplo, grphi, gp_tasks) do { } \
550 while (0)
538#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 551#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
539#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) 552#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
540#define trace_rcu_prep_idle(reason) do { } while (0) 553#define trace_rcu_prep_idle(reason) do { } while (0)
541#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) 554#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
542#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) 555#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
543#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) 556 do { } while (0)
557#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
558 do { } while (0)
544#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) 559#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
545#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 560#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
546#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 561#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
diff --git a/init/Kconfig b/init/Kconfig
index 3f42cd66f0f8..72f33faca44f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -438,15 +438,6 @@ config PREEMPT_RCU
438 This option enables preemptible-RCU code that is common between 438 This option enables preemptible-RCU code that is common between
439 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. 439 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
440 440
441config RCU_TRACE
442 bool "Enable tracing for RCU"
443 help
444 This option provides tracing in RCU which presents stats
445 in debugfs for debugging RCU implementation.
446
447 Say Y here if you want to enable RCU tracing
448 Say N if you are unsure.
449
450config RCU_FANOUT 441config RCU_FANOUT
451 int "Tree-based hierarchical RCU fanout value" 442 int "Tree-based hierarchical RCU fanout value"
452 range 2 64 if 64BIT 443 range 2 64 if 64BIT
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8889f7dd7c46..ea9ee4518c35 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4176,7 +4176,13 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4176 printk("-------------------------------\n"); 4176 printk("-------------------------------\n");
4177 printk("%s:%d %s!\n", file, line, s); 4177 printk("%s:%d %s!\n", file, line, s);
4178 printk("\nother info that might help us debug this:\n\n"); 4178 printk("\nother info that might help us debug this:\n\n");
4179 printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); 4179 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4180 !rcu_lockdep_current_cpu_online()
4181 ? "RCU used illegally from offline CPU!\n"
4182 : rcu_is_cpu_idle()
4183 ? "RCU used illegally from idle CPU!\n"
4184 : "",
4185 rcu_scheduler_active, debug_locks);
4180 4186
4181 /* 4187 /*
4182 * If a CPU is in the RCU-free window in idle (ie: in the section 4188 * If a CPU is in the RCU-free window in idle (ie: in the section
diff --git a/kernel/rcu.h b/kernel/rcu.h
index aa88baab5f78..8ba99cdc6515 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -33,8 +33,27 @@
33 * Process-level increment to ->dynticks_nesting field. This allows for 33 * Process-level increment to ->dynticks_nesting field. This allows for
34 * architectures that use half-interrupts and half-exceptions from 34 * architectures that use half-interrupts and half-exceptions from
35 * process context. 35 * process context.
36 *
37 * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
38 * that counts the number of process-based reasons why RCU cannot
39 * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
40 * is the value used to increment or decrement this field.
41 *
42 * The rest of the bits could in principle be used to count interrupts,
43 * but this would mean that a negative-one value in the interrupt
44 * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
45 * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
46 * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
47 * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
48 * initial exit from idle.
36 */ 49 */
37#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1) 50#define DYNTICK_TASK_NEST_WIDTH 7
51#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
52#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
53#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
54#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
55#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
56 DYNTICK_TASK_FLAG)
38 57
39/* 58/*
40 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 59 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
@@ -50,7 +69,6 @@ extern struct debug_obj_descr rcuhead_debug_descr;
50 69
51static inline void debug_rcu_head_queue(struct rcu_head *head) 70static inline void debug_rcu_head_queue(struct rcu_head *head)
52{ 71{
53 WARN_ON_ONCE((unsigned long)head & 0x3);
54 debug_object_activate(head, &rcuhead_debug_descr); 72 debug_object_activate(head, &rcuhead_debug_descr);
55 debug_object_active_state(head, &rcuhead_debug_descr, 73 debug_object_active_state(head, &rcuhead_debug_descr,
56 STATE_RCU_HEAD_READY, 74 STATE_RCU_HEAD_READY,
@@ -76,16 +94,18 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
76 94
77extern void kfree(const void *); 95extern void kfree(const void *);
78 96
79static inline void __rcu_reclaim(char *rn, struct rcu_head *head) 97static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
80{ 98{
81 unsigned long offset = (unsigned long)head->func; 99 unsigned long offset = (unsigned long)head->func;
82 100
83 if (__is_kfree_rcu_offset(offset)) { 101 if (__is_kfree_rcu_offset(offset)) {
84 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); 102 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
85 kfree((void *)head - offset); 103 kfree((void *)head - offset);
104 return 1;
86 } else { 105 } else {
87 RCU_TRACE(trace_rcu_invoke_callback(rn, head)); 106 RCU_TRACE(trace_rcu_invoke_callback(rn, head));
88 head->func(head); 107 head->func(head);
108 return 0;
89 } 109 }
90} 110}
91 111
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2bc4e135ff23..a86f1741cc27 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -88,6 +88,9 @@ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
88 * section. 88 * section.
89 * 89 *
90 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 90 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
91 *
92 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
93 * offline from an RCU perspective, so check for those as well.
91 */ 94 */
92int rcu_read_lock_bh_held(void) 95int rcu_read_lock_bh_held(void)
93{ 96{
@@ -95,6 +98,8 @@ int rcu_read_lock_bh_held(void)
95 return 1; 98 return 1;
96 if (rcu_is_cpu_idle()) 99 if (rcu_is_cpu_idle())
97 return 0; 100 return 0;
101 if (!rcu_lockdep_current_cpu_online())
102 return 0;
98 return in_softirq() || irqs_disabled(); 103 return in_softirq() || irqs_disabled();
99} 104}
100EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 105EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 977296dca0a4..37a5444204d2 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,7 +53,7 @@ static void __call_rcu(struct rcu_head *head,
53 53
54#include "rcutiny_plugin.h" 54#include "rcutiny_plugin.h"
55 55
56static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING; 56static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
57 57
58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ 58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59static void rcu_idle_enter_common(long long oldval) 59static void rcu_idle_enter_common(long long oldval)
@@ -88,10 +88,16 @@ void rcu_idle_enter(void)
88 88
89 local_irq_save(flags); 89 local_irq_save(flags);
90 oldval = rcu_dynticks_nesting; 90 oldval = rcu_dynticks_nesting;
91 rcu_dynticks_nesting = 0; 91 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
92 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
93 DYNTICK_TASK_NEST_VALUE)
94 rcu_dynticks_nesting = 0;
95 else
96 rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
92 rcu_idle_enter_common(oldval); 97 rcu_idle_enter_common(oldval);
93 local_irq_restore(flags); 98 local_irq_restore(flags);
94} 99}
100EXPORT_SYMBOL_GPL(rcu_idle_enter);
95 101
96/* 102/*
97 * Exit an interrupt handler towards idle. 103 * Exit an interrupt handler towards idle.
@@ -140,11 +146,15 @@ void rcu_idle_exit(void)
140 146
141 local_irq_save(flags); 147 local_irq_save(flags);
142 oldval = rcu_dynticks_nesting; 148 oldval = rcu_dynticks_nesting;
143 WARN_ON_ONCE(oldval != 0); 149 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
144 rcu_dynticks_nesting = DYNTICK_TASK_NESTING; 150 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
151 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
152 else
153 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
145 rcu_idle_exit_common(oldval); 154 rcu_idle_exit_common(oldval);
146 local_irq_restore(flags); 155 local_irq_restore(flags);
147} 156}
157EXPORT_SYMBOL_GPL(rcu_idle_exit);
148 158
149/* 159/*
150 * Enter an interrupt handler, moving away from idle. 160 * Enter an interrupt handler, moving away from idle.
@@ -258,7 +268,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
258 268
259 /* If no RCU callbacks ready to invoke, just return. */ 269 /* If no RCU callbacks ready to invoke, just return. */
260 if (&rcp->rcucblist == rcp->donetail) { 270 if (&rcp->rcucblist == rcp->donetail) {
261 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); 271 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
262 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, 272 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
263 ACCESS_ONCE(rcp->rcucblist), 273 ACCESS_ONCE(rcp->rcucblist),
264 need_resched(), 274 need_resched(),
@@ -269,7 +279,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
269 279
270 /* Move the ready-to-invoke callbacks to a local list. */ 280 /* Move the ready-to-invoke callbacks to a local list. */
271 local_irq_save(flags); 281 local_irq_save(flags);
272 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); 282 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
273 list = rcp->rcucblist; 283 list = rcp->rcucblist;
274 rcp->rcucblist = *rcp->donetail; 284 rcp->rcucblist = *rcp->donetail;
275 *rcp->donetail = NULL; 285 *rcp->donetail = NULL;
@@ -319,6 +329,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
319 */ 329 */
320void synchronize_sched(void) 330void synchronize_sched(void)
321{ 331{
332 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
333 !lock_is_held(&rcu_lock_map) &&
334 !lock_is_held(&rcu_sched_lock_map),
335 "Illegal synchronize_sched() in RCU read-side critical section");
322 cond_resched(); 336 cond_resched();
323} 337}
324EXPORT_SYMBOL_GPL(synchronize_sched); 338EXPORT_SYMBOL_GPL(synchronize_sched);
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 9cb1ae4aabdd..22ecea0dfb62 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
132 RCU_TRACE(.rcb.name = "rcu_preempt") 132 RCU_TRACE(.rcb.name = "rcu_preempt")
133}; 133};
134 134
135static void rcu_read_unlock_special(struct task_struct *t);
135static int rcu_preempted_readers_exp(void); 136static int rcu_preempted_readers_exp(void);
136static void rcu_report_exp_done(void); 137static void rcu_report_exp_done(void);
137 138
@@ -146,6 +147,16 @@ static int rcu_cpu_blocking_cur_gp(void)
146/* 147/*
147 * Check for a running RCU reader. Because there is only one CPU, 148 * Check for a running RCU reader. Because there is only one CPU,
148 * there can be but one running RCU reader at a time. ;-) 149 * there can be but one running RCU reader at a time. ;-)
150 *
151 * Returns zero if there are no running readers. Returns a positive
152 * number if there is at least one reader within its RCU read-side
153 * critical section. Returns a negative number if an outermost reader
154 * is in the midst of exiting from its RCU read-side critical section
155 *
156 * Returns zero if there are no running readers. Returns a positive
157 * number if there is at least one reader within its RCU read-side
158 * critical section. Returns a negative number if an outermost reader
159 * is in the midst of exiting from its RCU read-side critical section.
149 */ 160 */
150static int rcu_preempt_running_reader(void) 161static int rcu_preempt_running_reader(void)
151{ 162{
@@ -307,7 +318,6 @@ static int rcu_boost(void)
307 t = container_of(tb, struct task_struct, rcu_node_entry); 318 t = container_of(tb, struct task_struct, rcu_node_entry);
308 rt_mutex_init_proxy_locked(&mtx, t); 319 rt_mutex_init_proxy_locked(&mtx, t);
309 t->rcu_boost_mutex = &mtx; 320 t->rcu_boost_mutex = &mtx;
310 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
311 raw_local_irq_restore(flags); 321 raw_local_irq_restore(flags);
312 rt_mutex_lock(&mtx); 322 rt_mutex_lock(&mtx);
313 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 323 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
@@ -475,7 +485,7 @@ void rcu_preempt_note_context_switch(void)
475 unsigned long flags; 485 unsigned long flags;
476 486
477 local_irq_save(flags); /* must exclude scheduler_tick(). */ 487 local_irq_save(flags); /* must exclude scheduler_tick(). */
478 if (rcu_preempt_running_reader() && 488 if (rcu_preempt_running_reader() > 0 &&
479 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
480 490
481 /* Possibly blocking in an RCU read-side critical section. */ 491 /* Possibly blocking in an RCU read-side critical section. */
@@ -494,6 +504,13 @@ void rcu_preempt_note_context_switch(void)
494 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); 504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
495 if (rcu_cpu_blocking_cur_gp()) 505 if (rcu_cpu_blocking_cur_gp())
496 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; 506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
507 } else if (rcu_preempt_running_reader() < 0 &&
508 t->rcu_read_unlock_special) {
509 /*
510 * Complete exit from RCU read-side critical section on
511 * behalf of preempted instance of __rcu_read_unlock().
512 */
513 rcu_read_unlock_special(t);
497 } 514 }
498 515
499 /* 516 /*
@@ -526,12 +543,15 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock);
526 * notify RCU core processing or task having blocked during the RCU 543 * notify RCU core processing or task having blocked during the RCU
527 * read-side critical section. 544 * read-side critical section.
528 */ 545 */
529static void rcu_read_unlock_special(struct task_struct *t) 546static noinline void rcu_read_unlock_special(struct task_struct *t)
530{ 547{
531 int empty; 548 int empty;
532 int empty_exp; 549 int empty_exp;
533 unsigned long flags; 550 unsigned long flags;
534 struct list_head *np; 551 struct list_head *np;
552#ifdef CONFIG_RCU_BOOST
553 struct rt_mutex *rbmp = NULL;
554#endif /* #ifdef CONFIG_RCU_BOOST */
535 int special; 555 int special;
536 556
537 /* 557 /*
@@ -552,7 +572,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
552 rcu_preempt_cpu_qs(); 572 rcu_preempt_cpu_qs();
553 573
554 /* Hardware IRQ handlers cannot block. */ 574 /* Hardware IRQ handlers cannot block. */
555 if (in_irq()) { 575 if (in_irq() || in_serving_softirq()) {
556 local_irq_restore(flags); 576 local_irq_restore(flags);
557 return; 577 return;
558 } 578 }
@@ -597,10 +617,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
597 } 617 }
598#ifdef CONFIG_RCU_BOOST 618#ifdef CONFIG_RCU_BOOST
599 /* Unboost self if was boosted. */ 619 /* Unboost self if was boosted. */
600 if (special & RCU_READ_UNLOCK_BOOSTED) { 620 if (t->rcu_boost_mutex != NULL) {
601 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; 621 rbmp = t->rcu_boost_mutex;
602 rt_mutex_unlock(t->rcu_boost_mutex);
603 t->rcu_boost_mutex = NULL; 622 t->rcu_boost_mutex = NULL;
623 rt_mutex_unlock(rbmp);
604 } 624 }
605#endif /* #ifdef CONFIG_RCU_BOOST */ 625#endif /* #ifdef CONFIG_RCU_BOOST */
606 local_irq_restore(flags); 626 local_irq_restore(flags);
@@ -618,13 +638,22 @@ void __rcu_read_unlock(void)
618 struct task_struct *t = current; 638 struct task_struct *t = current;
619 639
620 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ 640 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
621 --t->rcu_read_lock_nesting; 641 if (t->rcu_read_lock_nesting != 1)
622 barrier(); /* decrement before load of ->rcu_read_unlock_special */ 642 --t->rcu_read_lock_nesting;
623 if (t->rcu_read_lock_nesting == 0 && 643 else {
624 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 644 t->rcu_read_lock_nesting = INT_MIN;
625 rcu_read_unlock_special(t); 645 barrier(); /* assign before ->rcu_read_unlock_special load */
646 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
647 rcu_read_unlock_special(t);
648 barrier(); /* ->rcu_read_unlock_special load before assign */
649 t->rcu_read_lock_nesting = 0;
650 }
626#ifdef CONFIG_PROVE_LOCKING 651#ifdef CONFIG_PROVE_LOCKING
627 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); 652 {
653 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
654
655 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
656 }
628#endif /* #ifdef CONFIG_PROVE_LOCKING */ 657#endif /* #ifdef CONFIG_PROVE_LOCKING */
629} 658}
630EXPORT_SYMBOL_GPL(__rcu_read_unlock); 659EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -649,7 +678,7 @@ static void rcu_preempt_check_callbacks(void)
649 invoke_rcu_callbacks(); 678 invoke_rcu_callbacks();
650 if (rcu_preempt_gp_in_progress() && 679 if (rcu_preempt_gp_in_progress() &&
651 rcu_cpu_blocking_cur_gp() && 680 rcu_cpu_blocking_cur_gp() &&
652 rcu_preempt_running_reader()) 681 rcu_preempt_running_reader() > 0)
653 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 682 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
654} 683}
655 684
@@ -706,6 +735,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
706 */ 735 */
707void synchronize_rcu(void) 736void synchronize_rcu(void)
708{ 737{
738 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
739 !lock_is_held(&rcu_lock_map) &&
740 !lock_is_held(&rcu_sched_lock_map),
741 "Illegal synchronize_rcu() in RCU read-side critical section");
742
709#ifdef CONFIG_DEBUG_LOCK_ALLOC 743#ifdef CONFIG_DEBUG_LOCK_ALLOC
710 if (!rcu_scheduler_active) 744 if (!rcu_scheduler_active)
711 return; 745 return;
@@ -882,7 +916,8 @@ static void rcu_preempt_process_callbacks(void)
882static void invoke_rcu_callbacks(void) 916static void invoke_rcu_callbacks(void)
883{ 917{
884 have_rcu_kthread_work = 1; 918 have_rcu_kthread_work = 1;
885 wake_up(&rcu_kthread_wq); 919 if (rcu_kthread_task != NULL)
920 wake_up(&rcu_kthread_wq);
886} 921}
887 922
888#ifdef CONFIG_RCU_TRACE 923#ifdef CONFIG_RCU_TRACE
@@ -943,12 +978,16 @@ early_initcall(rcu_spawn_kthreads);
943 978
944#else /* #ifdef CONFIG_RCU_BOOST */ 979#else /* #ifdef CONFIG_RCU_BOOST */
945 980
981/* Hold off callback invocation until early_initcall() time. */
982static int rcu_scheduler_fully_active __read_mostly;
983
946/* 984/*
947 * Start up softirq processing of callbacks. 985 * Start up softirq processing of callbacks.
948 */ 986 */
949void invoke_rcu_callbacks(void) 987void invoke_rcu_callbacks(void)
950{ 988{
951 raise_softirq(RCU_SOFTIRQ); 989 if (rcu_scheduler_fully_active)
990 raise_softirq(RCU_SOFTIRQ);
952} 991}
953 992
954#ifdef CONFIG_RCU_TRACE 993#ifdef CONFIG_RCU_TRACE
@@ -963,10 +1002,14 @@ static bool rcu_is_callbacks_kthread(void)
963 1002
964#endif /* #ifdef CONFIG_RCU_TRACE */ 1003#endif /* #ifdef CONFIG_RCU_TRACE */
965 1004
966void rcu_init(void) 1005static int __init rcu_scheduler_really_started(void)
967{ 1006{
1007 rcu_scheduler_fully_active = 1;
968 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1008 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1009 raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */
1010 return 0;
969} 1011}
1012early_initcall(rcu_scheduler_really_started);
970 1013
971#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1014#endif /* #else #ifdef CONFIG_RCU_BOOST */
972 1015
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a58ac285fc69..a89b381a8c6e 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -65,7 +65,10 @@ static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
65static int fqs_holdoff; /* Hold time within burst (us). */ 65static int fqs_holdoff; /* Hold time within burst (us). */
66static int fqs_stutter = 3; /* Wait time between bursts (s). */ 66static int fqs_stutter = 3; /* Wait time between bursts (s). */
67static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */ 67static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
68static int onoff_holdoff; /* Seconds after boot before CPU hotplugs. */
68static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */ 69static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
70static int stall_cpu; /* CPU-stall duration (s). 0 for no stall. */
71static int stall_cpu_holdoff = 10; /* Time to wait until stall (s). */
69static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */ 72static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
70static int test_boost_interval = 7; /* Interval between boost tests, seconds. */ 73static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
71static int test_boost_duration = 4; /* Duration of each boost test, seconds. */ 74static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
@@ -95,8 +98,14 @@ module_param(fqs_stutter, int, 0444);
95MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); 98MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
96module_param(onoff_interval, int, 0444); 99module_param(onoff_interval, int, 0444);
97MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); 100MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
101module_param(onoff_holdoff, int, 0444);
102MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
98module_param(shutdown_secs, int, 0444); 103module_param(shutdown_secs, int, 0444);
99MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable."); 104MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
105module_param(stall_cpu, int, 0444);
106MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
107module_param(stall_cpu_holdoff, int, 0444);
108MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
100module_param(test_boost, int, 0444); 109module_param(test_boost, int, 0444);
101MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 110MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
102module_param(test_boost_interval, int, 0444); 111module_param(test_boost_interval, int, 0444);
@@ -129,6 +138,7 @@ static struct task_struct *shutdown_task;
129#ifdef CONFIG_HOTPLUG_CPU 138#ifdef CONFIG_HOTPLUG_CPU
130static struct task_struct *onoff_task; 139static struct task_struct *onoff_task;
131#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 140#endif /* #ifdef CONFIG_HOTPLUG_CPU */
141static struct task_struct *stall_task;
132 142
133#define RCU_TORTURE_PIPE_LEN 10 143#define RCU_TORTURE_PIPE_LEN 10
134 144
@@ -990,12 +1000,12 @@ static void rcu_torture_timer(unsigned long unused)
990 rcu_read_lock_bh_held() || 1000 rcu_read_lock_bh_held() ||
991 rcu_read_lock_sched_held() || 1001 rcu_read_lock_sched_held() ||
992 srcu_read_lock_held(&srcu_ctl)); 1002 srcu_read_lock_held(&srcu_ctl));
993 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
994 if (p == NULL) { 1003 if (p == NULL) {
995 /* Leave because rcu_torture_writer is not yet underway */ 1004 /* Leave because rcu_torture_writer is not yet underway */
996 cur_ops->readunlock(idx); 1005 cur_ops->readunlock(idx);
997 return; 1006 return;
998 } 1007 }
1008 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
999 if (p->rtort_mbtest == 0) 1009 if (p->rtort_mbtest == 0)
1000 atomic_inc(&n_rcu_torture_mberror); 1010 atomic_inc(&n_rcu_torture_mberror);
1001 spin_lock(&rand_lock); 1011 spin_lock(&rand_lock);
@@ -1053,13 +1063,13 @@ rcu_torture_reader(void *arg)
1053 rcu_read_lock_bh_held() || 1063 rcu_read_lock_bh_held() ||
1054 rcu_read_lock_sched_held() || 1064 rcu_read_lock_sched_held() ||
1055 srcu_read_lock_held(&srcu_ctl)); 1065 srcu_read_lock_held(&srcu_ctl));
1056 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1057 if (p == NULL) { 1066 if (p == NULL) {
1058 /* Wait for rcu_torture_writer to get underway */ 1067 /* Wait for rcu_torture_writer to get underway */
1059 cur_ops->readunlock(idx); 1068 cur_ops->readunlock(idx);
1060 schedule_timeout_interruptible(HZ); 1069 schedule_timeout_interruptible(HZ);
1061 continue; 1070 continue;
1062 } 1071 }
1072 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1063 if (p->rtort_mbtest == 0) 1073 if (p->rtort_mbtest == 0)
1064 atomic_inc(&n_rcu_torture_mberror); 1074 atomic_inc(&n_rcu_torture_mberror);
1065 cur_ops->read_delay(&rand); 1075 cur_ops->read_delay(&rand);
@@ -1300,13 +1310,13 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1300 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1310 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1301 "test_boost=%d/%d test_boost_interval=%d " 1311 "test_boost=%d/%d test_boost_interval=%d "
1302 "test_boost_duration=%d shutdown_secs=%d " 1312 "test_boost_duration=%d shutdown_secs=%d "
1303 "onoff_interval=%d\n", 1313 "onoff_interval=%d onoff_holdoff=%d\n",
1304 torture_type, tag, nrealreaders, nfakewriters, 1314 torture_type, tag, nrealreaders, nfakewriters,
1305 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1315 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1306 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1316 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1307 test_boost, cur_ops->can_boost, 1317 test_boost, cur_ops->can_boost,
1308 test_boost_interval, test_boost_duration, shutdown_secs, 1318 test_boost_interval, test_boost_duration, shutdown_secs,
1309 onoff_interval); 1319 onoff_interval, onoff_holdoff);
1310} 1320}
1311 1321
1312static struct notifier_block rcutorture_shutdown_nb = { 1322static struct notifier_block rcutorture_shutdown_nb = {
@@ -1410,6 +1420,11 @@ rcu_torture_onoff(void *arg)
1410 for_each_online_cpu(cpu) 1420 for_each_online_cpu(cpu)
1411 maxcpu = cpu; 1421 maxcpu = cpu;
1412 WARN_ON(maxcpu < 0); 1422 WARN_ON(maxcpu < 0);
1423 if (onoff_holdoff > 0) {
1424 VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
1425 schedule_timeout_interruptible(onoff_holdoff * HZ);
1426 VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
1427 }
1413 while (!kthread_should_stop()) { 1428 while (!kthread_should_stop()) {
1414 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); 1429 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
1415 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { 1430 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
@@ -1450,12 +1465,15 @@ rcu_torture_onoff(void *arg)
1450static int __cpuinit 1465static int __cpuinit
1451rcu_torture_onoff_init(void) 1466rcu_torture_onoff_init(void)
1452{ 1467{
1468 int ret;
1469
1453 if (onoff_interval <= 0) 1470 if (onoff_interval <= 0)
1454 return 0; 1471 return 0;
1455 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff"); 1472 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1456 if (IS_ERR(onoff_task)) { 1473 if (IS_ERR(onoff_task)) {
1474 ret = PTR_ERR(onoff_task);
1457 onoff_task = NULL; 1475 onoff_task = NULL;
1458 return PTR_ERR(onoff_task); 1476 return ret;
1459 } 1477 }
1460 return 0; 1478 return 0;
1461} 1479}
@@ -1481,6 +1499,63 @@ static void rcu_torture_onoff_cleanup(void)
1481 1499
1482#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 1500#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1483 1501
1502/*
1503 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1504 * induces a CPU stall for the time specified by stall_cpu.
1505 */
1506static int __cpuinit rcu_torture_stall(void *args)
1507{
1508 unsigned long stop_at;
1509
1510 VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
1511 if (stall_cpu_holdoff > 0) {
1512 VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
1513 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1514 VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
1515 }
1516 if (!kthread_should_stop()) {
1517 stop_at = get_seconds() + stall_cpu;
1518 /* RCU CPU stall is expected behavior in following code. */
1519 printk(KERN_ALERT "rcu_torture_stall start.\n");
1520 rcu_read_lock();
1521 preempt_disable();
1522 while (ULONG_CMP_LT(get_seconds(), stop_at))
1523 continue; /* Induce RCU CPU stall warning. */
1524 preempt_enable();
1525 rcu_read_unlock();
1526 printk(KERN_ALERT "rcu_torture_stall end.\n");
1527 }
1528 rcutorture_shutdown_absorb("rcu_torture_stall");
1529 while (!kthread_should_stop())
1530 schedule_timeout_interruptible(10 * HZ);
1531 return 0;
1532}
1533
1534/* Spawn CPU-stall kthread, if stall_cpu specified. */
1535static int __init rcu_torture_stall_init(void)
1536{
1537 int ret;
1538
1539 if (stall_cpu <= 0)
1540 return 0;
1541 stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
1542 if (IS_ERR(stall_task)) {
1543 ret = PTR_ERR(stall_task);
1544 stall_task = NULL;
1545 return ret;
1546 }
1547 return 0;
1548}
1549
1550/* Clean up after the CPU-stall kthread, if one was spawned. */
1551static void rcu_torture_stall_cleanup(void)
1552{
1553 if (stall_task == NULL)
1554 return;
1555 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
1556 kthread_stop(stall_task);
1557}
1558
1484static int rcutorture_cpu_notify(struct notifier_block *self, 1559static int rcutorture_cpu_notify(struct notifier_block *self,
1485 unsigned long action, void *hcpu) 1560 unsigned long action, void *hcpu)
1486{ 1561{
@@ -1523,6 +1598,7 @@ rcu_torture_cleanup(void)
1523 fullstop = FULLSTOP_RMMOD; 1598 fullstop = FULLSTOP_RMMOD;
1524 mutex_unlock(&fullstop_mutex); 1599 mutex_unlock(&fullstop_mutex);
1525 unregister_reboot_notifier(&rcutorture_shutdown_nb); 1600 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1601 rcu_torture_stall_cleanup();
1526 if (stutter_task) { 1602 if (stutter_task) {
1527 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); 1603 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1528 kthread_stop(stutter_task); 1604 kthread_stop(stutter_task);
@@ -1602,6 +1678,10 @@ rcu_torture_cleanup(void)
1602 cur_ops->cleanup(); 1678 cur_ops->cleanup();
1603 if (atomic_read(&n_rcu_torture_error)) 1679 if (atomic_read(&n_rcu_torture_error))
1604 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 1680 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1681 else if (n_online_successes != n_online_attempts ||
1682 n_offline_successes != n_offline_attempts)
1683 rcu_torture_print_module_parms(cur_ops,
1684 "End of test: RCU_HOTPLUG");
1605 else 1685 else
1606 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 1686 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1607} 1687}
@@ -1819,6 +1899,7 @@ rcu_torture_init(void)
1819 } 1899 }
1820 rcu_torture_onoff_init(); 1900 rcu_torture_onoff_init();
1821 register_reboot_notifier(&rcutorture_shutdown_nb); 1901 register_reboot_notifier(&rcutorture_shutdown_nb);
1902 rcu_torture_stall_init();
1822 rcutorture_record_test_transition(); 1903 rcutorture_record_test_transition();
1823 mutex_unlock(&fullstop_mutex); 1904 mutex_unlock(&fullstop_mutex);
1824 return 0; 1905 return 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6c4a6722abfd..1050d6d3922c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -50,6 +50,8 @@
50#include <linux/wait.h> 50#include <linux/wait.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/prefetch.h> 52#include <linux/prefetch.h>
53#include <linux/delay.h>
54#include <linux/stop_machine.h>
53 55
54#include "rcutree.h" 56#include "rcutree.h"
55#include <trace/events/rcu.h> 57#include <trace/events/rcu.h>
@@ -196,7 +198,7 @@ void rcu_note_context_switch(int cpu)
196EXPORT_SYMBOL_GPL(rcu_note_context_switch); 198EXPORT_SYMBOL_GPL(rcu_note_context_switch);
197 199
198DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 200DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
199 .dynticks_nesting = DYNTICK_TASK_NESTING, 201 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
200 .dynticks = ATOMIC_INIT(1), 202 .dynticks = ATOMIC_INIT(1),
201}; 203};
202 204
@@ -208,8 +210,11 @@ module_param(blimit, int, 0);
208module_param(qhimark, int, 0); 210module_param(qhimark, int, 0);
209module_param(qlowmark, int, 0); 211module_param(qlowmark, int, 0);
210 212
211int rcu_cpu_stall_suppress __read_mostly; 213int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
214int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
215
212module_param(rcu_cpu_stall_suppress, int, 0644); 216module_param(rcu_cpu_stall_suppress, int, 0644);
217module_param(rcu_cpu_stall_timeout, int, 0644);
213 218
214static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 219static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
215static int rcu_pending(int cpu); 220static int rcu_pending(int cpu);
@@ -301,8 +306,6 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
301 return &rsp->node[0]; 306 return &rsp->node[0];
302} 307}
303 308
304#ifdef CONFIG_SMP
305
306/* 309/*
307 * If the specified CPU is offline, tell the caller that it is in 310 * If the specified CPU is offline, tell the caller that it is in
308 * a quiescent state. Otherwise, whack it with a reschedule IPI. 311 * a quiescent state. Otherwise, whack it with a reschedule IPI.
@@ -317,30 +320,21 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
317static int rcu_implicit_offline_qs(struct rcu_data *rdp) 320static int rcu_implicit_offline_qs(struct rcu_data *rdp)
318{ 321{
319 /* 322 /*
320 * If the CPU is offline, it is in a quiescent state. We can 323 * If the CPU is offline for more than a jiffy, it is in a quiescent
321 * trust its state not to change because interrupts are disabled. 324 * state. We can trust its state not to change because interrupts
325 * are disabled. The reason for the jiffy's worth of slack is to
326 * handle CPUs initializing on the way up and finding their way
327 * to the idle loop on the way down.
322 */ 328 */
323 if (cpu_is_offline(rdp->cpu)) { 329 if (cpu_is_offline(rdp->cpu) &&
330 ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) {
324 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); 331 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
325 rdp->offline_fqs++; 332 rdp->offline_fqs++;
326 return 1; 333 return 1;
327 } 334 }
328
329 /*
330 * The CPU is online, so send it a reschedule IPI. This forces
331 * it through the scheduler, and (inefficiently) also handles cases
332 * where idle loops fail to inform RCU about the CPU being idle.
333 */
334 if (rdp->cpu != smp_processor_id())
335 smp_send_reschedule(rdp->cpu);
336 else
337 set_need_resched();
338 rdp->resched_ipi++;
339 return 0; 335 return 0;
340} 336}
341 337
342#endif /* #ifdef CONFIG_SMP */
343
344/* 338/*
345 * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle 339 * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
346 * 340 *
@@ -366,6 +360,17 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
366 atomic_inc(&rdtp->dynticks); 360 atomic_inc(&rdtp->dynticks);
367 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ 361 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
368 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 362 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
363
364 /*
365 * The idle task is not permitted to enter the idle loop while
366 * in an RCU read-side critical section.
367 */
368 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
369 "Illegal idle entry in RCU read-side critical section.");
370 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
371 "Illegal idle entry in RCU-bh read-side critical section.");
372 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
373 "Illegal idle entry in RCU-sched read-side critical section.");
369} 374}
370 375
371/** 376/**
@@ -389,10 +394,15 @@ void rcu_idle_enter(void)
389 local_irq_save(flags); 394 local_irq_save(flags);
390 rdtp = &__get_cpu_var(rcu_dynticks); 395 rdtp = &__get_cpu_var(rcu_dynticks);
391 oldval = rdtp->dynticks_nesting; 396 oldval = rdtp->dynticks_nesting;
392 rdtp->dynticks_nesting = 0; 397 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
398 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
399 rdtp->dynticks_nesting = 0;
400 else
401 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
393 rcu_idle_enter_common(rdtp, oldval); 402 rcu_idle_enter_common(rdtp, oldval);
394 local_irq_restore(flags); 403 local_irq_restore(flags);
395} 404}
405EXPORT_SYMBOL_GPL(rcu_idle_enter);
396 406
397/** 407/**
398 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 408 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
@@ -462,7 +472,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
462 * Exit idle mode, in other words, -enter- the mode in which RCU 472 * Exit idle mode, in other words, -enter- the mode in which RCU
463 * read-side critical sections can occur. 473 * read-side critical sections can occur.
464 * 474 *
465 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to 475 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
466 * allow for the possibility of usermode upcalls messing up our count 476 * allow for the possibility of usermode upcalls messing up our count
467 * of interrupt nesting level during the busy period that is just 477 * of interrupt nesting level during the busy period that is just
468 * now starting. 478 * now starting.
@@ -476,11 +486,15 @@ void rcu_idle_exit(void)
476 local_irq_save(flags); 486 local_irq_save(flags);
477 rdtp = &__get_cpu_var(rcu_dynticks); 487 rdtp = &__get_cpu_var(rcu_dynticks);
478 oldval = rdtp->dynticks_nesting; 488 oldval = rdtp->dynticks_nesting;
479 WARN_ON_ONCE(oldval != 0); 489 WARN_ON_ONCE(oldval < 0);
480 rdtp->dynticks_nesting = DYNTICK_TASK_NESTING; 490 if (oldval & DYNTICK_TASK_NEST_MASK)
491 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
492 else
493 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
481 rcu_idle_exit_common(rdtp, oldval); 494 rcu_idle_exit_common(rdtp, oldval);
482 local_irq_restore(flags); 495 local_irq_restore(flags);
483} 496}
497EXPORT_SYMBOL_GPL(rcu_idle_exit);
484 498
485/** 499/**
486 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 500 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -581,6 +595,49 @@ int rcu_is_cpu_idle(void)
581} 595}
582EXPORT_SYMBOL(rcu_is_cpu_idle); 596EXPORT_SYMBOL(rcu_is_cpu_idle);
583 597
598#ifdef CONFIG_HOTPLUG_CPU
599
600/*
601 * Is the current CPU online? Disable preemption to avoid false positives
602 * that could otherwise happen due to the current CPU number being sampled,
603 * this task being preempted, its old CPU being taken offline, resuming
604 * on some other CPU, then determining that its old CPU is now offline.
605 * It is OK to use RCU on an offline processor during initial boot, hence
606 * the check for rcu_scheduler_fully_active. Note also that it is OK
607 * for a CPU coming online to use RCU for one jiffy prior to marking itself
608 * online in the cpu_online_mask. Similarly, it is OK for a CPU going
609 * offline to continue to use RCU for one jiffy after marking itself
610 * offline in the cpu_online_mask. This leniency is necessary given the
611 * non-atomic nature of the online and offline processing, for example,
612 * the fact that a CPU enters the scheduler after completing the CPU_DYING
613 * notifiers.
614 *
615 * This is also why RCU internally marks CPUs online during the
616 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
617 *
618 * Disable checking if in an NMI handler because we cannot safely report
619 * errors from NMI handlers anyway.
620 */
621bool rcu_lockdep_current_cpu_online(void)
622{
623 struct rcu_data *rdp;
624 struct rcu_node *rnp;
625 bool ret;
626
627 if (in_nmi())
628 return 1;
629 preempt_disable();
630 rdp = &__get_cpu_var(rcu_sched_data);
631 rnp = rdp->mynode;
632 ret = (rdp->grpmask & rnp->qsmaskinit) ||
633 !rcu_scheduler_fully_active;
634 preempt_enable();
635 return ret;
636}
637EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
638
639#endif /* #ifdef CONFIG_HOTPLUG_CPU */
640
584#endif /* #ifdef CONFIG_PROVE_RCU */ 641#endif /* #ifdef CONFIG_PROVE_RCU */
585 642
586/** 643/**
@@ -595,8 +652,6 @@ int rcu_is_cpu_rrupt_from_idle(void)
595 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; 652 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
596} 653}
597 654
598#ifdef CONFIG_SMP
599
600/* 655/*
601 * Snapshot the specified CPU's dynticks counter so that we can later 656 * Snapshot the specified CPU's dynticks counter so that we can later
602 * credit them with an implicit quiescent state. Return 1 if this CPU 657 * credit them with an implicit quiescent state. Return 1 if this CPU
@@ -640,12 +695,28 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
640 return rcu_implicit_offline_qs(rdp); 695 return rcu_implicit_offline_qs(rdp);
641} 696}
642 697
643#endif /* #ifdef CONFIG_SMP */ 698static int jiffies_till_stall_check(void)
699{
700 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
701
702 /*
703 * Limit check must be consistent with the Kconfig limits
704 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
705 */
706 if (till_stall_check < 3) {
707 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
708 till_stall_check = 3;
709 } else if (till_stall_check > 300) {
710 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
711 till_stall_check = 300;
712 }
713 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
714}
644 715
645static void record_gp_stall_check_time(struct rcu_state *rsp) 716static void record_gp_stall_check_time(struct rcu_state *rsp)
646{ 717{
647 rsp->gp_start = jiffies; 718 rsp->gp_start = jiffies;
648 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; 719 rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
649} 720}
650 721
651static void print_other_cpu_stall(struct rcu_state *rsp) 722static void print_other_cpu_stall(struct rcu_state *rsp)
@@ -664,13 +735,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
664 raw_spin_unlock_irqrestore(&rnp->lock, flags); 735 raw_spin_unlock_irqrestore(&rnp->lock, flags);
665 return; 736 return;
666 } 737 }
667 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 738 rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
668
669 /*
670 * Now rat on any tasks that got kicked up to the root rcu_node
671 * due to CPU offlining.
672 */
673 ndetected = rcu_print_task_stall(rnp);
674 raw_spin_unlock_irqrestore(&rnp->lock, flags); 739 raw_spin_unlock_irqrestore(&rnp->lock, flags);
675 740
676 /* 741 /*
@@ -678,8 +743,9 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
678 * See Documentation/RCU/stallwarn.txt for info on how to debug 743 * See Documentation/RCU/stallwarn.txt for info on how to debug
679 * RCU CPU stall warnings. 744 * RCU CPU stall warnings.
680 */ 745 */
681 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", 746 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:",
682 rsp->name); 747 rsp->name);
748 print_cpu_stall_info_begin();
683 rcu_for_each_leaf_node(rsp, rnp) { 749 rcu_for_each_leaf_node(rsp, rnp) {
684 raw_spin_lock_irqsave(&rnp->lock, flags); 750 raw_spin_lock_irqsave(&rnp->lock, flags);
685 ndetected += rcu_print_task_stall(rnp); 751 ndetected += rcu_print_task_stall(rnp);
@@ -688,11 +754,22 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
688 continue; 754 continue;
689 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 755 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
690 if (rnp->qsmask & (1UL << cpu)) { 756 if (rnp->qsmask & (1UL << cpu)) {
691 printk(" %d", rnp->grplo + cpu); 757 print_cpu_stall_info(rsp, rnp->grplo + cpu);
692 ndetected++; 758 ndetected++;
693 } 759 }
694 } 760 }
695 printk("} (detected by %d, t=%ld jiffies)\n", 761
762 /*
763 * Now rat on any tasks that got kicked up to the root rcu_node
764 * due to CPU offlining.
765 */
766 rnp = rcu_get_root(rsp);
767 raw_spin_lock_irqsave(&rnp->lock, flags);
768 ndetected = rcu_print_task_stall(rnp);
769 raw_spin_unlock_irqrestore(&rnp->lock, flags);
770
771 print_cpu_stall_info_end();
772 printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n",
696 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 773 smp_processor_id(), (long)(jiffies - rsp->gp_start));
697 if (ndetected == 0) 774 if (ndetected == 0)
698 printk(KERN_ERR "INFO: Stall ended before state dump start\n"); 775 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
@@ -716,15 +793,18 @@ static void print_cpu_stall(struct rcu_state *rsp)
716 * See Documentation/RCU/stallwarn.txt for info on how to debug 793 * See Documentation/RCU/stallwarn.txt for info on how to debug
717 * RCU CPU stall warnings. 794 * RCU CPU stall warnings.
718 */ 795 */
719 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", 796 printk(KERN_ERR "INFO: %s self-detected stall on CPU", rsp->name);
720 rsp->name, smp_processor_id(), jiffies - rsp->gp_start); 797 print_cpu_stall_info_begin();
798 print_cpu_stall_info(rsp, smp_processor_id());
799 print_cpu_stall_info_end();
800 printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start);
721 if (!trigger_all_cpu_backtrace()) 801 if (!trigger_all_cpu_backtrace())
722 dump_stack(); 802 dump_stack();
723 803
724 raw_spin_lock_irqsave(&rnp->lock, flags); 804 raw_spin_lock_irqsave(&rnp->lock, flags);
725 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) 805 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
726 rsp->jiffies_stall = 806 rsp->jiffies_stall = jiffies +
727 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 807 3 * jiffies_till_stall_check() + 3;
728 raw_spin_unlock_irqrestore(&rnp->lock, flags); 808 raw_spin_unlock_irqrestore(&rnp->lock, flags);
729 809
730 set_need_resched(); /* kick ourselves to get things going. */ 810 set_need_resched(); /* kick ourselves to get things going. */
@@ -807,6 +887,7 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
807 rdp->passed_quiesce = 0; 887 rdp->passed_quiesce = 0;
808 } else 888 } else
809 rdp->qs_pending = 0; 889 rdp->qs_pending = 0;
890 zero_cpu_stall_ticks(rdp);
810 } 891 }
811} 892}
812 893
@@ -943,6 +1024,10 @@ rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
943 * in preparation for detecting the next grace period. The caller must hold 1024 * in preparation for detecting the next grace period. The caller must hold
944 * the root node's ->lock, which is released before return. Hard irqs must 1025 * the root node's ->lock, which is released before return. Hard irqs must
945 * be disabled. 1026 * be disabled.
1027 *
1028 * Note that it is legal for a dying CPU (which is marked as offline) to
1029 * invoke this function. This can happen when the dying CPU reports its
1030 * quiescent state.
946 */ 1031 */
947static void 1032static void
948rcu_start_gp(struct rcu_state *rsp, unsigned long flags) 1033rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
@@ -980,26 +1065,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
980 rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 1065 rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
981 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1066 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
982 record_gp_stall_check_time(rsp); 1067 record_gp_stall_check_time(rsp);
983
984 /* Special-case the common single-level case. */
985 if (NUM_RCU_NODES == 1) {
986 rcu_preempt_check_blocked_tasks(rnp);
987 rnp->qsmask = rnp->qsmaskinit;
988 rnp->gpnum = rsp->gpnum;
989 rnp->completed = rsp->completed;
990 rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
991 rcu_start_gp_per_cpu(rsp, rnp, rdp);
992 rcu_preempt_boost_start_gp(rnp);
993 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
994 rnp->level, rnp->grplo,
995 rnp->grphi, rnp->qsmask);
996 raw_spin_unlock_irqrestore(&rnp->lock, flags);
997 return;
998 }
999
1000 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ 1068 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
1001 1069
1002
1003 /* Exclude any concurrent CPU-hotplug operations. */ 1070 /* Exclude any concurrent CPU-hotplug operations. */
1004 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ 1071 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
1005 1072
@@ -1245,53 +1312,115 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1245 1312
1246/* 1313/*
1247 * Move a dying CPU's RCU callbacks to online CPU's callback list. 1314 * Move a dying CPU's RCU callbacks to online CPU's callback list.
1248 * Synchronization is not required because this function executes 1315 * Also record a quiescent state for this CPU for the current grace period.
1249 * in stop_machine() context. 1316 * Synchronization and interrupt disabling are not required because
1317 * this function executes in stop_machine() context. Therefore, cleanup
1318 * operations that might block must be done later from the CPU_DEAD
1319 * notifier.
1320 *
1321 * Note that the outgoing CPU's bit has already been cleared in the
1322 * cpu_online_mask. This allows us to randomly pick a callback
1323 * destination from the bits set in that mask.
1250 */ 1324 */
1251static void rcu_send_cbs_to_online(struct rcu_state *rsp) 1325static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1252{ 1326{
1253 int i; 1327 int i;
1254 /* current DYING CPU is cleared in the cpu_online_mask */ 1328 unsigned long mask;
1255 int receive_cpu = cpumask_any(cpu_online_mask); 1329 int receive_cpu = cpumask_any(cpu_online_mask);
1256 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1330 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1257 struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); 1331 struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
1332 RCU_TRACE(struct rcu_node *rnp = rdp->mynode); /* For dying CPU. */
1333
1334 /* First, adjust the counts. */
1335 if (rdp->nxtlist != NULL) {
1336 receive_rdp->qlen_lazy += rdp->qlen_lazy;
1337 receive_rdp->qlen += rdp->qlen;
1338 rdp->qlen_lazy = 0;
1339 rdp->qlen = 0;
1340 }
1258 1341
1259 if (rdp->nxtlist == NULL) 1342 /*
1260 return; /* irqs disabled, so comparison is stable. */ 1343 * Next, move ready-to-invoke callbacks to be invoked on some
1344 * other CPU. These will not be required to pass through another
1345 * grace period: They are done, regardless of CPU.
1346 */
1347 if (rdp->nxtlist != NULL &&
1348 rdp->nxttail[RCU_DONE_TAIL] != &rdp->nxtlist) {
1349 struct rcu_head *oldhead;
1350 struct rcu_head **oldtail;
1351 struct rcu_head **newtail;
1352
1353 oldhead = rdp->nxtlist;
1354 oldtail = receive_rdp->nxttail[RCU_DONE_TAIL];
1355 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1356 *rdp->nxttail[RCU_DONE_TAIL] = *oldtail;
1357 *receive_rdp->nxttail[RCU_DONE_TAIL] = oldhead;
1358 newtail = rdp->nxttail[RCU_DONE_TAIL];
1359 for (i = RCU_DONE_TAIL; i < RCU_NEXT_SIZE; i++) {
1360 if (receive_rdp->nxttail[i] == oldtail)
1361 receive_rdp->nxttail[i] = newtail;
1362 if (rdp->nxttail[i] == newtail)
1363 rdp->nxttail[i] = &rdp->nxtlist;
1364 }
1365 }
1261 1366
1262 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; 1367 /*
1263 receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 1368 * Finally, put the rest of the callbacks at the end of the list.
1264 receive_rdp->qlen += rdp->qlen; 1369 * The ones that made it partway through get to start over: We
1265 receive_rdp->n_cbs_adopted += rdp->qlen; 1370 * cannot assume that grace periods are synchronized across CPUs.
1266 rdp->n_cbs_orphaned += rdp->qlen; 1371 * (We could splice RCU_WAIT_TAIL into RCU_NEXT_READY_TAIL, but
1372 * this does not seem compelling. Not yet, anyway.)
1373 */
1374 if (rdp->nxtlist != NULL) {
1375 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
1376 receive_rdp->nxttail[RCU_NEXT_TAIL] =
1377 rdp->nxttail[RCU_NEXT_TAIL];
1378 receive_rdp->n_cbs_adopted += rdp->qlen;
1379 rdp->n_cbs_orphaned += rdp->qlen;
1380
1381 rdp->nxtlist = NULL;
1382 for (i = 0; i < RCU_NEXT_SIZE; i++)
1383 rdp->nxttail[i] = &rdp->nxtlist;
1384 }
1267 1385
1268 rdp->nxtlist = NULL; 1386 /*
1269 for (i = 0; i < RCU_NEXT_SIZE; i++) 1387 * Record a quiescent state for the dying CPU. This is safe
1270 rdp->nxttail[i] = &rdp->nxtlist; 1388 * only because we have already cleared out the callbacks.
1271 rdp->qlen = 0; 1389 * (Otherwise, the RCU core might try to schedule the invocation
1390 * of callbacks on this now-offline CPU, which would be bad.)
1391 */
1392 mask = rdp->grpmask; /* rnp->grplo is constant. */
1393 trace_rcu_grace_period(rsp->name,
1394 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
1395 "cpuofl");
1396 rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum);
1397 /* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */
1272} 1398}
1273 1399
1274/* 1400/*
1275 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 1401 * The CPU has been completely removed, and some other CPU is reporting
1276 * and move all callbacks from the outgoing CPU to the current one. 1402 * this fact from process context. Do the remainder of the cleanup.
1277 * There can only be one CPU hotplug operation at a time, so no other 1403 * There can only be one CPU hotplug operation at a time, so no other
1278 * CPU can be attempting to update rcu_cpu_kthread_task. 1404 * CPU can be attempting to update rcu_cpu_kthread_task.
1279 */ 1405 */
1280static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 1406static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1281{ 1407{
1282 unsigned long flags; 1408 unsigned long flags;
1283 unsigned long mask; 1409 unsigned long mask;
1284 int need_report = 0; 1410 int need_report = 0;
1285 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1411 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1286 struct rcu_node *rnp; 1412 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rnp. */
1287 1413
1414 /* Adjust any no-longer-needed kthreads. */
1288 rcu_stop_cpu_kthread(cpu); 1415 rcu_stop_cpu_kthread(cpu);
1416 rcu_node_kthread_setaffinity(rnp, -1);
1417
1418 /* Remove the dying CPU from the bitmasks in the rcu_node hierarchy. */
1289 1419
1290 /* Exclude any attempts to start a new grace period. */ 1420 /* Exclude any attempts to start a new grace period. */
1291 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1421 raw_spin_lock_irqsave(&rsp->onofflock, flags);
1292 1422
1293 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 1423 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
1294 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
1295 mask = rdp->grpmask; /* rnp->grplo is constant. */ 1424 mask = rdp->grpmask; /* rnp->grplo is constant. */
1296 do { 1425 do {
1297 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1426 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
@@ -1299,20 +1428,11 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1299 if (rnp->qsmaskinit != 0) { 1428 if (rnp->qsmaskinit != 0) {
1300 if (rnp != rdp->mynode) 1429 if (rnp != rdp->mynode)
1301 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1430 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1302 else
1303 trace_rcu_grace_period(rsp->name,
1304 rnp->gpnum + 1 -
1305 !!(rnp->qsmask & mask),
1306 "cpuofl");
1307 break; 1431 break;
1308 } 1432 }
1309 if (rnp == rdp->mynode) { 1433 if (rnp == rdp->mynode)
1310 trace_rcu_grace_period(rsp->name,
1311 rnp->gpnum + 1 -
1312 !!(rnp->qsmask & mask),
1313 "cpuofl");
1314 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 1434 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1315 } else 1435 else
1316 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1436 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1317 mask = rnp->grpmask; 1437 mask = rnp->grpmask;
1318 rnp = rnp->parent; 1438 rnp = rnp->parent;
@@ -1332,29 +1452,15 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1332 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1452 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1333 if (need_report & RCU_OFL_TASKS_EXP_GP) 1453 if (need_report & RCU_OFL_TASKS_EXP_GP)
1334 rcu_report_exp_rnp(rsp, rnp, true); 1454 rcu_report_exp_rnp(rsp, rnp, true);
1335 rcu_node_kthread_setaffinity(rnp, -1);
1336}
1337
1338/*
1339 * Remove the specified CPU from the RCU hierarchy and move any pending
1340 * callbacks that it might have to the current CPU. This code assumes
1341 * that at least one CPU in the system will remain running at all times.
1342 * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
1343 */
1344static void rcu_offline_cpu(int cpu)
1345{
1346 __rcu_offline_cpu(cpu, &rcu_sched_state);
1347 __rcu_offline_cpu(cpu, &rcu_bh_state);
1348 rcu_preempt_offline_cpu(cpu);
1349} 1455}
1350 1456
1351#else /* #ifdef CONFIG_HOTPLUG_CPU */ 1457#else /* #ifdef CONFIG_HOTPLUG_CPU */
1352 1458
1353static void rcu_send_cbs_to_online(struct rcu_state *rsp) 1459static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1354{ 1460{
1355} 1461}
1356 1462
1357static void rcu_offline_cpu(int cpu) 1463static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1358{ 1464{
1359} 1465}
1360 1466
@@ -1368,11 +1474,11 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1368{ 1474{
1369 unsigned long flags; 1475 unsigned long flags;
1370 struct rcu_head *next, *list, **tail; 1476 struct rcu_head *next, *list, **tail;
1371 int bl, count; 1477 int bl, count, count_lazy;
1372 1478
1373 /* If no callbacks are ready, just return.*/ 1479 /* If no callbacks are ready, just return.*/
1374 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 1480 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1375 trace_rcu_batch_start(rsp->name, 0, 0); 1481 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
1376 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), 1482 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
1377 need_resched(), is_idle_task(current), 1483 need_resched(), is_idle_task(current),
1378 rcu_is_callbacks_kthread()); 1484 rcu_is_callbacks_kthread());
@@ -1384,8 +1490,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1384 * races with call_rcu() from interrupt handlers. 1490 * races with call_rcu() from interrupt handlers.
1385 */ 1491 */
1386 local_irq_save(flags); 1492 local_irq_save(flags);
1493 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1387 bl = rdp->blimit; 1494 bl = rdp->blimit;
1388 trace_rcu_batch_start(rsp->name, rdp->qlen, bl); 1495 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
1389 list = rdp->nxtlist; 1496 list = rdp->nxtlist;
1390 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 1497 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1391 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 1498 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
@@ -1396,12 +1503,13 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1396 local_irq_restore(flags); 1503 local_irq_restore(flags);
1397 1504
1398 /* Invoke callbacks. */ 1505 /* Invoke callbacks. */
1399 count = 0; 1506 count = count_lazy = 0;
1400 while (list) { 1507 while (list) {
1401 next = list->next; 1508 next = list->next;
1402 prefetch(next); 1509 prefetch(next);
1403 debug_rcu_head_unqueue(list); 1510 debug_rcu_head_unqueue(list);
1404 __rcu_reclaim(rsp->name, list); 1511 if (__rcu_reclaim(rsp->name, list))
1512 count_lazy++;
1405 list = next; 1513 list = next;
1406 /* Stop only if limit reached and CPU has something to do. */ 1514 /* Stop only if limit reached and CPU has something to do. */
1407 if (++count >= bl && 1515 if (++count >= bl &&
@@ -1416,6 +1524,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1416 rcu_is_callbacks_kthread()); 1524 rcu_is_callbacks_kthread());
1417 1525
1418 /* Update count, and requeue any remaining callbacks. */ 1526 /* Update count, and requeue any remaining callbacks. */
1527 rdp->qlen_lazy -= count_lazy;
1419 rdp->qlen -= count; 1528 rdp->qlen -= count;
1420 rdp->n_cbs_invoked += count; 1529 rdp->n_cbs_invoked += count;
1421 if (list != NULL) { 1530 if (list != NULL) {
@@ -1458,6 +1567,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1458void rcu_check_callbacks(int cpu, int user) 1567void rcu_check_callbacks(int cpu, int user)
1459{ 1568{
1460 trace_rcu_utilization("Start scheduler-tick"); 1569 trace_rcu_utilization("Start scheduler-tick");
1570 increment_cpu_stall_ticks();
1461 if (user || rcu_is_cpu_rrupt_from_idle()) { 1571 if (user || rcu_is_cpu_rrupt_from_idle()) {
1462 1572
1463 /* 1573 /*
@@ -1492,8 +1602,6 @@ void rcu_check_callbacks(int cpu, int user)
1492 trace_rcu_utilization("End scheduler-tick"); 1602 trace_rcu_utilization("End scheduler-tick");
1493} 1603}
1494 1604
1495#ifdef CONFIG_SMP
1496
1497/* 1605/*
1498 * Scan the leaf rcu_node structures, processing dyntick state for any that 1606 * Scan the leaf rcu_node structures, processing dyntick state for any that
1499 * have not yet encountered a quiescent state, using the function specified. 1607 * have not yet encountered a quiescent state, using the function specified.
@@ -1616,15 +1724,6 @@ unlock_fqs_ret:
1616 trace_rcu_utilization("End fqs"); 1724 trace_rcu_utilization("End fqs");
1617} 1725}
1618 1726
1619#else /* #ifdef CONFIG_SMP */
1620
1621static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1622{
1623 set_need_resched();
1624}
1625
1626#endif /* #else #ifdef CONFIG_SMP */
1627
1628/* 1727/*
1629 * This does the RCU core processing work for the specified rcu_state 1728 * This does the RCU core processing work for the specified rcu_state
1630 * and rcu_data structures. This may be called only from the CPU to 1729 * and rcu_data structures. This may be called only from the CPU to
@@ -1702,11 +1801,12 @@ static void invoke_rcu_core(void)
1702 1801
1703static void 1802static void
1704__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1803__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1705 struct rcu_state *rsp) 1804 struct rcu_state *rsp, bool lazy)
1706{ 1805{
1707 unsigned long flags; 1806 unsigned long flags;
1708 struct rcu_data *rdp; 1807 struct rcu_data *rdp;
1709 1808
1809 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
1710 debug_rcu_head_queue(head); 1810 debug_rcu_head_queue(head);
1711 head->func = func; 1811 head->func = func;
1712 head->next = NULL; 1812 head->next = NULL;
@@ -1720,18 +1820,21 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1720 * a quiescent state betweentimes. 1820 * a quiescent state betweentimes.
1721 */ 1821 */
1722 local_irq_save(flags); 1822 local_irq_save(flags);
1823 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1723 rdp = this_cpu_ptr(rsp->rda); 1824 rdp = this_cpu_ptr(rsp->rda);
1724 1825
1725 /* Add the callback to our list. */ 1826 /* Add the callback to our list. */
1726 *rdp->nxttail[RCU_NEXT_TAIL] = head; 1827 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1727 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1828 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1728 rdp->qlen++; 1829 rdp->qlen++;
1830 if (lazy)
1831 rdp->qlen_lazy++;
1729 1832
1730 if (__is_kfree_rcu_offset((unsigned long)func)) 1833 if (__is_kfree_rcu_offset((unsigned long)func))
1731 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, 1834 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1732 rdp->qlen); 1835 rdp->qlen_lazy, rdp->qlen);
1733 else 1836 else
1734 trace_rcu_callback(rsp->name, head, rdp->qlen); 1837 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
1735 1838
1736 /* If interrupts were disabled, don't dive into RCU core. */ 1839 /* If interrupts were disabled, don't dive into RCU core. */
1737 if (irqs_disabled_flags(flags)) { 1840 if (irqs_disabled_flags(flags)) {
@@ -1778,16 +1881,16 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1778 */ 1881 */
1779void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1882void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1780{ 1883{
1781 __call_rcu(head, func, &rcu_sched_state); 1884 __call_rcu(head, func, &rcu_sched_state, 0);
1782} 1885}
1783EXPORT_SYMBOL_GPL(call_rcu_sched); 1886EXPORT_SYMBOL_GPL(call_rcu_sched);
1784 1887
1785/* 1888/*
1786 * Queue an RCU for invocation after a quicker grace period. 1889 * Queue an RCU callback for invocation after a quicker grace period.
1787 */ 1890 */
1788void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1891void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1789{ 1892{
1790 __call_rcu(head, func, &rcu_bh_state); 1893 __call_rcu(head, func, &rcu_bh_state, 0);
1791} 1894}
1792EXPORT_SYMBOL_GPL(call_rcu_bh); 1895EXPORT_SYMBOL_GPL(call_rcu_bh);
1793 1896
@@ -1816,6 +1919,10 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1816 */ 1919 */
1817void synchronize_sched(void) 1920void synchronize_sched(void)
1818{ 1921{
1922 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
1923 !lock_is_held(&rcu_lock_map) &&
1924 !lock_is_held(&rcu_sched_lock_map),
1925 "Illegal synchronize_sched() in RCU-sched read-side critical section");
1819 if (rcu_blocking_is_gp()) 1926 if (rcu_blocking_is_gp())
1820 return; 1927 return;
1821 wait_rcu_gp(call_rcu_sched); 1928 wait_rcu_gp(call_rcu_sched);
@@ -1833,12 +1940,137 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
1833 */ 1940 */
1834void synchronize_rcu_bh(void) 1941void synchronize_rcu_bh(void)
1835{ 1942{
1943 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
1944 !lock_is_held(&rcu_lock_map) &&
1945 !lock_is_held(&rcu_sched_lock_map),
1946 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
1836 if (rcu_blocking_is_gp()) 1947 if (rcu_blocking_is_gp())
1837 return; 1948 return;
1838 wait_rcu_gp(call_rcu_bh); 1949 wait_rcu_gp(call_rcu_bh);
1839} 1950}
1840EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 1951EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1841 1952
1953static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1954static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1955
1956static int synchronize_sched_expedited_cpu_stop(void *data)
1957{
1958 /*
1959 * There must be a full memory barrier on each affected CPU
1960 * between the time that try_stop_cpus() is called and the
1961 * time that it returns.
1962 *
1963 * In the current initial implementation of cpu_stop, the
1964 * above condition is already met when the control reaches
1965 * this point and the following smp_mb() is not strictly
1966 * necessary. Do smp_mb() anyway for documentation and
1967 * robustness against future implementation changes.
1968 */
1969 smp_mb(); /* See above comment block. */
1970 return 0;
1971}
1972
1973/**
1974 * synchronize_sched_expedited - Brute-force RCU-sched grace period
1975 *
1976 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
1977 * approach to force the grace period to end quickly. This consumes
1978 * significant time on all CPUs and is unfriendly to real-time workloads,
1979 * so is thus not recommended for any sort of common-case code. In fact,
1980 * if you are using synchronize_sched_expedited() in a loop, please
1981 * restructure your code to batch your updates, and then use a single
1982 * synchronize_sched() instead.
1983 *
1984 * Note that it is illegal to call this function while holding any lock
1985 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
1986 * to call this function from a CPU-hotplug notifier. Failing to observe
1987 * these restriction will result in deadlock.
1988 *
1989 * This implementation can be thought of as an application of ticket
1990 * locking to RCU, with sync_sched_expedited_started and
1991 * sync_sched_expedited_done taking on the roles of the halves
1992 * of the ticket-lock word. Each task atomically increments
1993 * sync_sched_expedited_started upon entry, snapshotting the old value,
1994 * then attempts to stop all the CPUs. If this succeeds, then each
1995 * CPU will have executed a context switch, resulting in an RCU-sched
1996 * grace period. We are then done, so we use atomic_cmpxchg() to
1997 * update sync_sched_expedited_done to match our snapshot -- but
1998 * only if someone else has not already advanced past our snapshot.
1999 *
2000 * On the other hand, if try_stop_cpus() fails, we check the value
2001 * of sync_sched_expedited_done. If it has advanced past our
2002 * initial snapshot, then someone else must have forced a grace period
2003 * some time after we took our snapshot. In this case, our work is
2004 * done for us, and we can simply return. Otherwise, we try again,
2005 * but keep our initial snapshot for purposes of checking for someone
2006 * doing our work for us.
2007 *
2008 * If we fail too many times in a row, we fall back to synchronize_sched().
2009 */
2010void synchronize_sched_expedited(void)
2011{
2012 int firstsnap, s, snap, trycount = 0;
2013
2014 /* Note that atomic_inc_return() implies full memory barrier. */
2015 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
2016 get_online_cpus();
2017 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2018
2019 /*
2020 * Each pass through the following loop attempts to force a
2021 * context switch on each CPU.
2022 */
2023 while (try_stop_cpus(cpu_online_mask,
2024 synchronize_sched_expedited_cpu_stop,
2025 NULL) == -EAGAIN) {
2026 put_online_cpus();
2027
2028 /* No joy, try again later. Or just synchronize_sched(). */
2029 if (trycount++ < 10)
2030 udelay(trycount * num_online_cpus());
2031 else {
2032 synchronize_sched();
2033 return;
2034 }
2035
2036 /* Check to see if someone else did our work for us. */
2037 s = atomic_read(&sync_sched_expedited_done);
2038 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
2039 smp_mb(); /* ensure test happens before caller kfree */
2040 return;
2041 }
2042
2043 /*
2044 * Refetching sync_sched_expedited_started allows later
2045 * callers to piggyback on our grace period. We subtract
2046 * 1 to get the same token that the last incrementer got.
2047 * We retry after they started, so our grace period works
2048 * for them, and they started after our first try, so their
2049 * grace period works for us.
2050 */
2051 get_online_cpus();
2052 snap = atomic_read(&sync_sched_expedited_started);
2053 smp_mb(); /* ensure read is before try_stop_cpus(). */
2054 }
2055
2056 /*
2057 * Everyone up to our most recent fetch is covered by our grace
2058 * period. Update the counter, but only if our work is still
2059 * relevant -- which it won't be if someone who started later
2060 * than we did beat us to the punch.
2061 */
2062 do {
2063 s = atomic_read(&sync_sched_expedited_done);
2064 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
2065 smp_mb(); /* ensure test happens before caller kfree */
2066 break;
2067 }
2068 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
2069
2070 put_online_cpus();
2071}
2072EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2073
1842/* 2074/*
1843 * Check to see if there is any immediate RCU-related work to be done 2075 * Check to see if there is any immediate RCU-related work to be done
1844 * by the current CPU, for the specified type of RCU, returning 1 if so. 2076 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1932,7 +2164,7 @@ static int rcu_cpu_has_callbacks(int cpu)
1932 /* RCU callbacks either ready or pending? */ 2164 /* RCU callbacks either ready or pending? */
1933 return per_cpu(rcu_sched_data, cpu).nxtlist || 2165 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1934 per_cpu(rcu_bh_data, cpu).nxtlist || 2166 per_cpu(rcu_bh_data, cpu).nxtlist ||
1935 rcu_preempt_needs_cpu(cpu); 2167 rcu_preempt_cpu_has_callbacks(cpu);
1936} 2168}
1937 2169
1938static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 2170static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
@@ -2027,9 +2259,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2027 rdp->nxtlist = NULL; 2259 rdp->nxtlist = NULL;
2028 for (i = 0; i < RCU_NEXT_SIZE; i++) 2260 for (i = 0; i < RCU_NEXT_SIZE; i++)
2029 rdp->nxttail[i] = &rdp->nxtlist; 2261 rdp->nxttail[i] = &rdp->nxtlist;
2262 rdp->qlen_lazy = 0;
2030 rdp->qlen = 0; 2263 rdp->qlen = 0;
2031 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2264 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2032 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING); 2265 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2033 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 2266 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2034 rdp->cpu = cpu; 2267 rdp->cpu = cpu;
2035 rdp->rsp = rsp; 2268 rdp->rsp = rsp;
@@ -2057,7 +2290,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2057 rdp->qlen_last_fqs_check = 0; 2290 rdp->qlen_last_fqs_check = 0;
2058 rdp->n_force_qs_snap = rsp->n_force_qs; 2291 rdp->n_force_qs_snap = rsp->n_force_qs;
2059 rdp->blimit = blimit; 2292 rdp->blimit = blimit;
2060 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING; 2293 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2061 atomic_set(&rdp->dynticks->dynticks, 2294 atomic_set(&rdp->dynticks->dynticks,
2062 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 2295 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2063 rcu_prepare_for_idle_init(cpu); 2296 rcu_prepare_for_idle_init(cpu);
@@ -2139,16 +2372,18 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2139 * touch any data without introducing corruption. We send the 2372 * touch any data without introducing corruption. We send the
2140 * dying CPU's callbacks to an arbitrarily chosen online CPU. 2373 * dying CPU's callbacks to an arbitrarily chosen online CPU.
2141 */ 2374 */
2142 rcu_send_cbs_to_online(&rcu_bh_state); 2375 rcu_cleanup_dying_cpu(&rcu_bh_state);
2143 rcu_send_cbs_to_online(&rcu_sched_state); 2376 rcu_cleanup_dying_cpu(&rcu_sched_state);
2144 rcu_preempt_send_cbs_to_online(); 2377 rcu_preempt_cleanup_dying_cpu();
2145 rcu_cleanup_after_idle(cpu); 2378 rcu_cleanup_after_idle(cpu);
2146 break; 2379 break;
2147 case CPU_DEAD: 2380 case CPU_DEAD:
2148 case CPU_DEAD_FROZEN: 2381 case CPU_DEAD_FROZEN:
2149 case CPU_UP_CANCELED: 2382 case CPU_UP_CANCELED:
2150 case CPU_UP_CANCELED_FROZEN: 2383 case CPU_UP_CANCELED_FROZEN:
2151 rcu_offline_cpu(cpu); 2384 rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
2385 rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
2386 rcu_preempt_cleanup_dead_cpu(cpu);
2152 break; 2387 break;
2153 default: 2388 default:
2154 break; 2389 break;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index fddff92d6676..cdd1be0a4072 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -239,6 +239,12 @@ struct rcu_data {
239 bool preemptible; /* Preemptible RCU? */ 239 bool preemptible; /* Preemptible RCU? */
240 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 240 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
241 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 241 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
242#ifdef CONFIG_RCU_CPU_STALL_INFO
243 unsigned long ticks_this_gp; /* The number of scheduling-clock */
244 /* ticks this CPU has handled */
245 /* during and after the last grace */
246 /* period it is aware of. */
247#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
242 248
243 /* 2) batch handling */ 249 /* 2) batch handling */
244 /* 250 /*
@@ -265,7 +271,8 @@ struct rcu_data {
265 */ 271 */
266 struct rcu_head *nxtlist; 272 struct rcu_head *nxtlist;
267 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 273 struct rcu_head **nxttail[RCU_NEXT_SIZE];
268 long qlen; /* # of queued callbacks */ 274 long qlen_lazy; /* # of lazy queued callbacks */
275 long qlen; /* # of queued callbacks, incl lazy */
269 long qlen_last_fqs_check; 276 long qlen_last_fqs_check;
270 /* qlen at last check for QS forcing */ 277 /* qlen at last check for QS forcing */
271 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 278 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
@@ -282,7 +289,6 @@ struct rcu_data {
282 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 289 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
283 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 290 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
284 unsigned long offline_fqs; /* Kicked due to being offline. */ 291 unsigned long offline_fqs; /* Kicked due to being offline. */
285 unsigned long resched_ipi; /* Sent a resched IPI. */
286 292
287 /* 5) __rcu_pending() statistics. */ 293 /* 5) __rcu_pending() statistics. */
288 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 294 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -313,12 +319,6 @@ struct rcu_data {
313#else 319#else
314#define RCU_STALL_DELAY_DELTA 0 320#define RCU_STALL_DELAY_DELTA 0
315#endif 321#endif
316
317#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
318 RCU_STALL_DELAY_DELTA)
319 /* for rsp->jiffies_stall */
320#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
321 /* for rsp->jiffies_stall */
322#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ 322#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
323 /* to take at least one */ 323 /* to take at least one */
324 /* scheduling clock irq */ 324 /* scheduling clock irq */
@@ -438,8 +438,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
438static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 438static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
439 struct rcu_node *rnp, 439 struct rcu_node *rnp,
440 struct rcu_data *rdp); 440 struct rcu_data *rdp);
441static void rcu_preempt_offline_cpu(int cpu);
442#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 441#endif /* #ifdef CONFIG_HOTPLUG_CPU */
442static void rcu_preempt_cleanup_dead_cpu(int cpu);
443static void rcu_preempt_check_callbacks(int cpu); 443static void rcu_preempt_check_callbacks(int cpu);
444static void rcu_preempt_process_callbacks(void); 444static void rcu_preempt_process_callbacks(void);
445void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 445void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
@@ -448,9 +448,9 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
448 bool wake); 448 bool wake);
449#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ 449#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
450static int rcu_preempt_pending(int cpu); 450static int rcu_preempt_pending(int cpu);
451static int rcu_preempt_needs_cpu(int cpu); 451static int rcu_preempt_cpu_has_callbacks(int cpu);
452static void __cpuinit rcu_preempt_init_percpu_data(int cpu); 452static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
453static void rcu_preempt_send_cbs_to_online(void); 453static void rcu_preempt_cleanup_dying_cpu(void);
454static void __init __rcu_init_preempt(void); 454static void __init __rcu_init_preempt(void);
455static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 455static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
456static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 456static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -471,5 +471,10 @@ static void __cpuinit rcu_prepare_kthreads(int cpu);
471static void rcu_prepare_for_idle_init(int cpu); 471static void rcu_prepare_for_idle_init(int cpu);
472static void rcu_cleanup_after_idle(int cpu); 472static void rcu_cleanup_after_idle(int cpu);
473static void rcu_prepare_for_idle(int cpu); 473static void rcu_prepare_for_idle(int cpu);
474static void print_cpu_stall_info_begin(void);
475static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
476static void print_cpu_stall_info_end(void);
477static void zero_cpu_stall_ticks(struct rcu_data *rdp);
478static void increment_cpu_stall_ticks(void);
474 479
475#endif /* #ifndef RCU_TREE_NONCORE */ 480#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 8bb35d73e1f9..c023464816be 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -25,7 +25,6 @@
25 */ 25 */
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h>
29 28
30#define RCU_KTHREAD_PRIO 1 29#define RCU_KTHREAD_PRIO 1
31 30
@@ -63,7 +62,10 @@ static void __init rcu_bootup_announce_oddness(void)
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); 62 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64#endif 63#endif
65#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) 64#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); 65 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
66#endif
67#if defined(CONFIG_RCU_CPU_STALL_INFO)
68 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
67#endif 69#endif
68#if NUM_RCU_LVL_4 != 0 70#if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); 71 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
@@ -490,6 +492,31 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
490 492
491#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 493#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
492 494
495#ifdef CONFIG_RCU_CPU_STALL_INFO
496
497static void rcu_print_task_stall_begin(struct rcu_node *rnp)
498{
499 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
500 rnp->level, rnp->grplo, rnp->grphi);
501}
502
503static void rcu_print_task_stall_end(void)
504{
505 printk(KERN_CONT "\n");
506}
507
508#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
509
510static void rcu_print_task_stall_begin(struct rcu_node *rnp)
511{
512}
513
514static void rcu_print_task_stall_end(void)
515{
516}
517
518#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
519
493/* 520/*
494 * Scan the current list of tasks blocked within RCU read-side critical 521 * Scan the current list of tasks blocked within RCU read-side critical
495 * sections, printing out the tid of each. 522 * sections, printing out the tid of each.
@@ -501,12 +528,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
501 528
502 if (!rcu_preempt_blocked_readers_cgp(rnp)) 529 if (!rcu_preempt_blocked_readers_cgp(rnp))
503 return 0; 530 return 0;
531 rcu_print_task_stall_begin(rnp);
504 t = list_entry(rnp->gp_tasks, 532 t = list_entry(rnp->gp_tasks,
505 struct task_struct, rcu_node_entry); 533 struct task_struct, rcu_node_entry);
506 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 534 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
507 printk(" P%d", t->pid); 535 printk(KERN_CONT " P%d", t->pid);
508 ndetected++; 536 ndetected++;
509 } 537 }
538 rcu_print_task_stall_end();
510 return ndetected; 539 return ndetected;
511} 540}
512 541
@@ -581,7 +610,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
581 * absolutely necessary, but this is a good performance/complexity 610 * absolutely necessary, but this is a good performance/complexity
582 * tradeoff. 611 * tradeoff.
583 */ 612 */
584 if (rcu_preempt_blocked_readers_cgp(rnp)) 613 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
585 retval |= RCU_OFL_TASKS_NORM_GP; 614 retval |= RCU_OFL_TASKS_NORM_GP;
586 if (rcu_preempted_readers_exp(rnp)) 615 if (rcu_preempted_readers_exp(rnp))
587 retval |= RCU_OFL_TASKS_EXP_GP; 616 retval |= RCU_OFL_TASKS_EXP_GP;
@@ -618,16 +647,16 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
618 return retval; 647 return retval;
619} 648}
620 649
650#endif /* #ifdef CONFIG_HOTPLUG_CPU */
651
621/* 652/*
622 * Do CPU-offline processing for preemptible RCU. 653 * Do CPU-offline processing for preemptible RCU.
623 */ 654 */
624static void rcu_preempt_offline_cpu(int cpu) 655static void rcu_preempt_cleanup_dead_cpu(int cpu)
625{ 656{
626 __rcu_offline_cpu(cpu, &rcu_preempt_state); 657 rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
627} 658}
628 659
629#endif /* #ifdef CONFIG_HOTPLUG_CPU */
630
631/* 660/*
632 * Check for a quiescent state from the current CPU. When a task blocks, 661 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure, 662 * the task is recorded in the corresponding CPU's rcu_node structure,
@@ -671,10 +700,24 @@ static void rcu_preempt_do_callbacks(void)
671 */ 700 */
672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 701void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673{ 702{
674 __call_rcu(head, func, &rcu_preempt_state); 703 __call_rcu(head, func, &rcu_preempt_state, 0);
675} 704}
676EXPORT_SYMBOL_GPL(call_rcu); 705EXPORT_SYMBOL_GPL(call_rcu);
677 706
707/*
708 * Queue an RCU callback for lazy invocation after a grace period.
709 * This will likely be later named something like "call_rcu_lazy()",
710 * but this change will require some way of tagging the lazy RCU
711 * callbacks in the list of pending callbacks. Until then, this
712 * function may only be called from __kfree_rcu().
713 */
714void kfree_call_rcu(struct rcu_head *head,
715 void (*func)(struct rcu_head *rcu))
716{
717 __call_rcu(head, func, &rcu_preempt_state, 1);
718}
719EXPORT_SYMBOL_GPL(kfree_call_rcu);
720
678/** 721/**
679 * synchronize_rcu - wait until a grace period has elapsed. 722 * synchronize_rcu - wait until a grace period has elapsed.
680 * 723 *
@@ -688,6 +731,10 @@ EXPORT_SYMBOL_GPL(call_rcu);
688 */ 731 */
689void synchronize_rcu(void) 732void synchronize_rcu(void)
690{ 733{
734 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
735 !lock_is_held(&rcu_lock_map) &&
736 !lock_is_held(&rcu_sched_lock_map),
737 "Illegal synchronize_rcu() in RCU read-side critical section");
691 if (!rcu_scheduler_active) 738 if (!rcu_scheduler_active)
692 return; 739 return;
693 wait_rcu_gp(call_rcu); 740 wait_rcu_gp(call_rcu);
@@ -788,10 +835,22 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
788 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
789} 836}
790 837
791/* 838/**
792 * Wait for an rcu-preempt grace period, but expedite it. The basic idea 839 * synchronize_rcu_expedited - Brute-force RCU grace period
793 * is to invoke synchronize_sched_expedited() to push all the tasks to 840 *
794 * the ->blkd_tasks lists and wait for this list to drain. 841 * Wait for an RCU-preempt grace period, but expedite it. The basic
842 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843 * the ->blkd_tasks lists and wait for this list to drain. This consumes
844 * significant time on all CPUs and is unfriendly to real-time workloads,
845 * so is thus not recommended for any sort of common-case code.
846 * In fact, if you are using synchronize_rcu_expedited() in a loop,
847 * please restructure your code to batch your updates, and then Use a
848 * single synchronize_rcu() instead.
849 *
850 * Note that it is illegal to call this function while holding any lock
851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
852 * to call this function from a CPU-hotplug notifier. Failing to observe
853 * these restriction will result in deadlock.
795 */ 854 */
796void synchronize_rcu_expedited(void) 855void synchronize_rcu_expedited(void)
797{ 856{
@@ -869,9 +928,9 @@ static int rcu_preempt_pending(int cpu)
869} 928}
870 929
871/* 930/*
872 * Does preemptible RCU need the CPU to stay out of dynticks mode? 931 * Does preemptible RCU have callbacks on this CPU?
873 */ 932 */
874static int rcu_preempt_needs_cpu(int cpu) 933static int rcu_preempt_cpu_has_callbacks(int cpu)
875{ 934{
876 return !!per_cpu(rcu_preempt_data, cpu).nxtlist; 935 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
877} 936}
@@ -894,11 +953,12 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
894} 953}
895 954
896/* 955/*
897 * Move preemptible RCU's callbacks from dying CPU to other online CPU. 956 * Move preemptible RCU's callbacks from dying CPU to other online CPU
957 * and record a quiescent state.
898 */ 958 */
899static void rcu_preempt_send_cbs_to_online(void) 959static void rcu_preempt_cleanup_dying_cpu(void)
900{ 960{
901 rcu_send_cbs_to_online(&rcu_preempt_state); 961 rcu_cleanup_dying_cpu(&rcu_preempt_state);
902} 962}
903 963
904/* 964/*
@@ -1034,16 +1094,16 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1034 return 0; 1094 return 0;
1035} 1095}
1036 1096
1097#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1098
1037/* 1099/*
1038 * Because preemptible RCU does not exist, it never needs CPU-offline 1100 * Because preemptible RCU does not exist, it never needs CPU-offline
1039 * processing. 1101 * processing.
1040 */ 1102 */
1041static void rcu_preempt_offline_cpu(int cpu) 1103static void rcu_preempt_cleanup_dead_cpu(int cpu)
1042{ 1104{
1043} 1105}
1044 1106
1045#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1046
1047/* 1107/*
1048 * Because preemptible RCU does not exist, it never has any callbacks 1108 * Because preemptible RCU does not exist, it never has any callbacks
1049 * to check. 1109 * to check.
@@ -1061,6 +1121,22 @@ static void rcu_preempt_process_callbacks(void)
1061} 1121}
1062 1122
1063/* 1123/*
1124 * Queue an RCU callback for lazy invocation after a grace period.
1125 * This will likely be later named something like "call_rcu_lazy()",
1126 * but this change will require some way of tagging the lazy RCU
1127 * callbacks in the list of pending callbacks. Until then, this
1128 * function may only be called from __kfree_rcu().
1129 *
1130 * Because there is no preemptible RCU, we use RCU-sched instead.
1131 */
1132void kfree_call_rcu(struct rcu_head *head,
1133 void (*func)(struct rcu_head *rcu))
1134{
1135 __call_rcu(head, func, &rcu_sched_state, 1);
1136}
1137EXPORT_SYMBOL_GPL(kfree_call_rcu);
1138
1139/*
1064 * Wait for an rcu-preempt grace period, but make it happen quickly. 1140 * Wait for an rcu-preempt grace period, but make it happen quickly.
1065 * But because preemptible RCU does not exist, map to rcu-sched. 1141 * But because preemptible RCU does not exist, map to rcu-sched.
1066 */ 1142 */
@@ -1093,9 +1169,9 @@ static int rcu_preempt_pending(int cpu)
1093} 1169}
1094 1170
1095/* 1171/*
1096 * Because preemptible RCU does not exist, it never needs any CPU. 1172 * Because preemptible RCU does not exist, it never has callbacks
1097 */ 1173 */
1098static int rcu_preempt_needs_cpu(int cpu) 1174static int rcu_preempt_cpu_has_callbacks(int cpu)
1099{ 1175{
1100 return 0; 1176 return 0;
1101} 1177}
@@ -1119,9 +1195,9 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1119} 1195}
1120 1196
1121/* 1197/*
1122 * Because there is no preemptible RCU, there are no callbacks to move. 1198 * Because there is no preemptible RCU, there is no cleanup to do.
1123 */ 1199 */
1124static void rcu_preempt_send_cbs_to_online(void) 1200static void rcu_preempt_cleanup_dying_cpu(void)
1125{ 1201{
1126} 1202}
1127 1203
@@ -1823,132 +1899,6 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1823 1899
1824#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1900#endif /* #else #ifdef CONFIG_RCU_BOOST */
1825 1901
1826#ifndef CONFIG_SMP
1827
1828void synchronize_sched_expedited(void)
1829{
1830 cond_resched();
1831}
1832EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1833
1834#else /* #ifndef CONFIG_SMP */
1835
1836static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1837static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1838
1839static int synchronize_sched_expedited_cpu_stop(void *data)
1840{
1841 /*
1842 * There must be a full memory barrier on each affected CPU
1843 * between the time that try_stop_cpus() is called and the
1844 * time that it returns.
1845 *
1846 * In the current initial implementation of cpu_stop, the
1847 * above condition is already met when the control reaches
1848 * this point and the following smp_mb() is not strictly
1849 * necessary. Do smp_mb() anyway for documentation and
1850 * robustness against future implementation changes.
1851 */
1852 smp_mb(); /* See above comment block. */
1853 return 0;
1854}
1855
1856/*
1857 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1858 * approach to force grace period to end quickly. This consumes
1859 * significant time on all CPUs, and is thus not recommended for
1860 * any sort of common-case code.
1861 *
1862 * Note that it is illegal to call this function while holding any
1863 * lock that is acquired by a CPU-hotplug notifier. Failing to
1864 * observe this restriction will result in deadlock.
1865 *
1866 * This implementation can be thought of as an application of ticket
1867 * locking to RCU, with sync_sched_expedited_started and
1868 * sync_sched_expedited_done taking on the roles of the halves
1869 * of the ticket-lock word. Each task atomically increments
1870 * sync_sched_expedited_started upon entry, snapshotting the old value,
1871 * then attempts to stop all the CPUs. If this succeeds, then each
1872 * CPU will have executed a context switch, resulting in an RCU-sched
1873 * grace period. We are then done, so we use atomic_cmpxchg() to
1874 * update sync_sched_expedited_done to match our snapshot -- but
1875 * only if someone else has not already advanced past our snapshot.
1876 *
1877 * On the other hand, if try_stop_cpus() fails, we check the value
1878 * of sync_sched_expedited_done. If it has advanced past our
1879 * initial snapshot, then someone else must have forced a grace period
1880 * some time after we took our snapshot. In this case, our work is
1881 * done for us, and we can simply return. Otherwise, we try again,
1882 * but keep our initial snapshot for purposes of checking for someone
1883 * doing our work for us.
1884 *
1885 * If we fail too many times in a row, we fall back to synchronize_sched().
1886 */
1887void synchronize_sched_expedited(void)
1888{
1889 int firstsnap, s, snap, trycount = 0;
1890
1891 /* Note that atomic_inc_return() implies full memory barrier. */
1892 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1893 get_online_cpus();
1894
1895 /*
1896 * Each pass through the following loop attempts to force a
1897 * context switch on each CPU.
1898 */
1899 while (try_stop_cpus(cpu_online_mask,
1900 synchronize_sched_expedited_cpu_stop,
1901 NULL) == -EAGAIN) {
1902 put_online_cpus();
1903
1904 /* No joy, try again later. Or just synchronize_sched(). */
1905 if (trycount++ < 10)
1906 udelay(trycount * num_online_cpus());
1907 else {
1908 synchronize_sched();
1909 return;
1910 }
1911
1912 /* Check to see if someone else did our work for us. */
1913 s = atomic_read(&sync_sched_expedited_done);
1914 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1915 smp_mb(); /* ensure test happens before caller kfree */
1916 return;
1917 }
1918
1919 /*
1920 * Refetching sync_sched_expedited_started allows later
1921 * callers to piggyback on our grace period. We subtract
1922 * 1 to get the same token that the last incrementer got.
1923 * We retry after they started, so our grace period works
1924 * for them, and they started after our first try, so their
1925 * grace period works for us.
1926 */
1927 get_online_cpus();
1928 snap = atomic_read(&sync_sched_expedited_started);
1929 smp_mb(); /* ensure read is before try_stop_cpus(). */
1930 }
1931
1932 /*
1933 * Everyone up to our most recent fetch is covered by our grace
1934 * period. Update the counter, but only if our work is still
1935 * relevant -- which it won't be if someone who started later
1936 * than we did beat us to the punch.
1937 */
1938 do {
1939 s = atomic_read(&sync_sched_expedited_done);
1940 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1941 smp_mb(); /* ensure test happens before caller kfree */
1942 break;
1943 }
1944 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1945
1946 put_online_cpus();
1947}
1948EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1949
1950#endif /* #else #ifndef CONFIG_SMP */
1951
1952#if !defined(CONFIG_RCU_FAST_NO_HZ) 1902#if !defined(CONFIG_RCU_FAST_NO_HZ)
1953 1903
1954/* 1904/*
@@ -1981,7 +1931,7 @@ static void rcu_cleanup_after_idle(int cpu)
1981} 1931}
1982 1932
1983/* 1933/*
1984 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y, 1934 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1985 * is nothing. 1935 * is nothing.
1986 */ 1936 */
1987static void rcu_prepare_for_idle(int cpu) 1937static void rcu_prepare_for_idle(int cpu)
@@ -2015,6 +1965,9 @@ static void rcu_prepare_for_idle(int cpu)
2015 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1965 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2016 * system. And if you are -that- concerned about energy efficiency, 1966 * system. And if you are -that- concerned about energy efficiency,
2017 * just power the system down and be done with it! 1967 * just power the system down and be done with it!
1968 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1969 * permitted to sleep in dyntick-idle mode with only lazy RCU
1970 * callbacks pending. Setting this too high can OOM your system.
2018 * 1971 *
2019 * The values below work well in practice. If future workloads require 1972 * The values below work well in practice. If future workloads require
2020 * adjustment, they can be converted into kernel config parameters, though 1973 * adjustment, they can be converted into kernel config parameters, though
@@ -2023,11 +1976,13 @@ static void rcu_prepare_for_idle(int cpu)
2023#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ 1976#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
2024#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ 1977#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
2025#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ 1978#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1979#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
2026 1980
2027static DEFINE_PER_CPU(int, rcu_dyntick_drain); 1981static DEFINE_PER_CPU(int, rcu_dyntick_drain);
2028static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); 1982static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
2029static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); 1983static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2030static ktime_t rcu_idle_gp_wait; 1984static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */
1985static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */
2031 1986
2032/* 1987/*
2033 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no 1988 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
@@ -2048,6 +2003,48 @@ int rcu_needs_cpu(int cpu)
2048} 2003}
2049 2004
2050/* 2005/*
2006 * Does the specified flavor of RCU have non-lazy callbacks pending on
2007 * the specified CPU? Both RCU flavor and CPU are specified by the
2008 * rcu_data structure.
2009 */
2010static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
2011{
2012 return rdp->qlen != rdp->qlen_lazy;
2013}
2014
2015#ifdef CONFIG_TREE_PREEMPT_RCU
2016
2017/*
2018 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2019 * is no RCU-preempt in the kernel.)
2020 */
2021static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2022{
2023 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
2024
2025 return __rcu_cpu_has_nonlazy_callbacks(rdp);
2026}
2027
2028#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2029
2030static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2031{
2032 return 0;
2033}
2034
2035#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2036
2037/*
2038 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2039 */
2040static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2041{
2042 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2043 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2044 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2045}
2046
2047/*
2051 * Timer handler used to force CPU to start pushing its remaining RCU 2048 * Timer handler used to force CPU to start pushing its remaining RCU
2052 * callbacks in the case where it entered dyntick-idle mode with callbacks 2049 * callbacks in the case where it entered dyntick-idle mode with callbacks
2053 * pending. The hander doesn't really need to do anything because the 2050 * pending. The hander doesn't really need to do anything because the
@@ -2074,6 +2071,8 @@ static void rcu_prepare_for_idle_init(int cpu)
2074 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY); 2071 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2075 2072
2076 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000); 2073 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2074 upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
2075 rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
2077 firsttime = 0; 2076 firsttime = 0;
2078 } 2077 }
2079} 2078}
@@ -2109,10 +2108,6 @@ static void rcu_cleanup_after_idle(int cpu)
2109 */ 2108 */
2110static void rcu_prepare_for_idle(int cpu) 2109static void rcu_prepare_for_idle(int cpu)
2111{ 2110{
2112 unsigned long flags;
2113
2114 local_irq_save(flags);
2115
2116 /* 2111 /*
2117 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2112 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2118 * Also reset state to avoid prejudicing later attempts. 2113 * Also reset state to avoid prejudicing later attempts.
@@ -2120,7 +2115,6 @@ static void rcu_prepare_for_idle(int cpu)
2120 if (!rcu_cpu_has_callbacks(cpu)) { 2115 if (!rcu_cpu_has_callbacks(cpu)) {
2121 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2116 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2122 per_cpu(rcu_dyntick_drain, cpu) = 0; 2117 per_cpu(rcu_dyntick_drain, cpu) = 0;
2123 local_irq_restore(flags);
2124 trace_rcu_prep_idle("No callbacks"); 2118 trace_rcu_prep_idle("No callbacks");
2125 return; 2119 return;
2126 } 2120 }
@@ -2130,7 +2124,6 @@ static void rcu_prepare_for_idle(int cpu)
2130 * refrained from disabling the scheduling-clock tick. 2124 * refrained from disabling the scheduling-clock tick.
2131 */ 2125 */
2132 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2126 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2133 local_irq_restore(flags);
2134 trace_rcu_prep_idle("In holdoff"); 2127 trace_rcu_prep_idle("In holdoff");
2135 return; 2128 return;
2136 } 2129 }
@@ -2140,18 +2133,22 @@ static void rcu_prepare_for_idle(int cpu)
2140 /* First time through, initialize the counter. */ 2133 /* First time through, initialize the counter. */
2141 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; 2134 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2142 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && 2135 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2143 !rcu_pending(cpu)) { 2136 !rcu_pending(cpu) &&
2137 !local_softirq_pending()) {
2144 /* Can we go dyntick-idle despite still having callbacks? */ 2138 /* Can we go dyntick-idle despite still having callbacks? */
2145 trace_rcu_prep_idle("Dyntick with callbacks"); 2139 trace_rcu_prep_idle("Dyntick with callbacks");
2146 per_cpu(rcu_dyntick_drain, cpu) = 0; 2140 per_cpu(rcu_dyntick_drain, cpu) = 0;
2147 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2141 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2148 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), 2142 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2149 rcu_idle_gp_wait, HRTIMER_MODE_REL); 2143 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2144 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2145 else
2146 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2147 rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
2150 return; /* Nothing more to do immediately. */ 2148 return; /* Nothing more to do immediately. */
2151 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2149 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2152 /* We have hit the limit, so time to give up. */ 2150 /* We have hit the limit, so time to give up. */
2153 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2151 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2154 local_irq_restore(flags);
2155 trace_rcu_prep_idle("Begin holdoff"); 2152 trace_rcu_prep_idle("Begin holdoff");
2156 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2153 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2157 return; 2154 return;
@@ -2163,23 +2160,17 @@ static void rcu_prepare_for_idle(int cpu)
2163 */ 2160 */
2164#ifdef CONFIG_TREE_PREEMPT_RCU 2161#ifdef CONFIG_TREE_PREEMPT_RCU
2165 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 2162 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2166 local_irq_restore(flags);
2167 rcu_preempt_qs(cpu); 2163 rcu_preempt_qs(cpu);
2168 force_quiescent_state(&rcu_preempt_state, 0); 2164 force_quiescent_state(&rcu_preempt_state, 0);
2169 local_irq_save(flags);
2170 } 2165 }
2171#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2166#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2172 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2167 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2173 local_irq_restore(flags);
2174 rcu_sched_qs(cpu); 2168 rcu_sched_qs(cpu);
2175 force_quiescent_state(&rcu_sched_state, 0); 2169 force_quiescent_state(&rcu_sched_state, 0);
2176 local_irq_save(flags);
2177 } 2170 }
2178 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 2171 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2179 local_irq_restore(flags);
2180 rcu_bh_qs(cpu); 2172 rcu_bh_qs(cpu);
2181 force_quiescent_state(&rcu_bh_state, 0); 2173 force_quiescent_state(&rcu_bh_state, 0);
2182 local_irq_save(flags);
2183 } 2174 }
2184 2175
2185 /* 2176 /*
@@ -2187,13 +2178,124 @@ static void rcu_prepare_for_idle(int cpu)
2187 * So try forcing the callbacks through the grace period. 2178 * So try forcing the callbacks through the grace period.
2188 */ 2179 */
2189 if (rcu_cpu_has_callbacks(cpu)) { 2180 if (rcu_cpu_has_callbacks(cpu)) {
2190 local_irq_restore(flags);
2191 trace_rcu_prep_idle("More callbacks"); 2181 trace_rcu_prep_idle("More callbacks");
2192 invoke_rcu_core(); 2182 invoke_rcu_core();
2193 } else { 2183 } else
2194 local_irq_restore(flags);
2195 trace_rcu_prep_idle("Callbacks drained"); 2184 trace_rcu_prep_idle("Callbacks drained");
2196 }
2197} 2185}
2198 2186
2199#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2187#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2188
2189#ifdef CONFIG_RCU_CPU_STALL_INFO
2190
2191#ifdef CONFIG_RCU_FAST_NO_HZ
2192
2193static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2194{
2195 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2196
2197 sprintf(cp, "drain=%d %c timer=%lld",
2198 per_cpu(rcu_dyntick_drain, cpu),
2199 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
2200 hrtimer_active(hrtp)
2201 ? ktime_to_us(hrtimer_get_remaining(hrtp))
2202 : -1);
2203}
2204
2205#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2206
2207static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2208{
2209}
2210
2211#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2212
2213/* Initiate the stall-info list. */
2214static void print_cpu_stall_info_begin(void)
2215{
2216 printk(KERN_CONT "\n");
2217}
2218
2219/*
2220 * Print out diagnostic information for the specified stalled CPU.
2221 *
2222 * If the specified CPU is aware of the current RCU grace period
2223 * (flavor specified by rsp), then print the number of scheduling
2224 * clock interrupts the CPU has taken during the time that it has
2225 * been aware. Otherwise, print the number of RCU grace periods
2226 * that this CPU is ignorant of, for example, "1" if the CPU was
2227 * aware of the previous grace period.
2228 *
2229 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2230 */
2231static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2232{
2233 char fast_no_hz[72];
2234 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2235 struct rcu_dynticks *rdtp = rdp->dynticks;
2236 char *ticks_title;
2237 unsigned long ticks_value;
2238
2239 if (rsp->gpnum == rdp->gpnum) {
2240 ticks_title = "ticks this GP";
2241 ticks_value = rdp->ticks_this_gp;
2242 } else {
2243 ticks_title = "GPs behind";
2244 ticks_value = rsp->gpnum - rdp->gpnum;
2245 }
2246 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2247 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2248 cpu, ticks_value, ticks_title,
2249 atomic_read(&rdtp->dynticks) & 0xfff,
2250 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2251 fast_no_hz);
2252}
2253
2254/* Terminate the stall-info list. */
2255static void print_cpu_stall_info_end(void)
2256{
2257 printk(KERN_ERR "\t");
2258}
2259
2260/* Zero ->ticks_this_gp for all flavors of RCU. */
2261static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2262{
2263 rdp->ticks_this_gp = 0;
2264}
2265
2266/* Increment ->ticks_this_gp for all flavors of RCU. */
2267static void increment_cpu_stall_ticks(void)
2268{
2269 __get_cpu_var(rcu_sched_data).ticks_this_gp++;
2270 __get_cpu_var(rcu_bh_data).ticks_this_gp++;
2271#ifdef CONFIG_TREE_PREEMPT_RCU
2272 __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2273#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2274}
2275
2276#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2277
2278static void print_cpu_stall_info_begin(void)
2279{
2280 printk(KERN_CONT " {");
2281}
2282
2283static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2284{
2285 printk(KERN_CONT " %d", cpu);
2286}
2287
2288static void print_cpu_stall_info_end(void)
2289{
2290 printk(KERN_CONT "} ");
2291}
2292
2293static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2294{
2295}
2296
2297static void increment_cpu_stall_ticks(void)
2298{
2299}
2300
2301#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 654cfe67f0d1..ed459edeff43 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -72,9 +72,9 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
72 rdp->dynticks->dynticks_nesting, 72 rdp->dynticks->dynticks_nesting,
73 rdp->dynticks->dynticks_nmi_nesting, 73 rdp->dynticks->dynticks_nmi_nesting,
74 rdp->dynticks_fqs); 74 rdp->dynticks_fqs);
75 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 75 seq_printf(m, " of=%lu", rdp->offline_fqs);
76 seq_printf(m, " ql=%ld qs=%c%c%c%c", 76 seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
77 rdp->qlen, 77 rdp->qlen_lazy, rdp->qlen,
78 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 78 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
79 rdp->nxttail[RCU_NEXT_TAIL]], 79 rdp->nxttail[RCU_NEXT_TAIL]],
80 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 80 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
@@ -144,8 +144,8 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
144 rdp->dynticks->dynticks_nesting, 144 rdp->dynticks->dynticks_nesting,
145 rdp->dynticks->dynticks_nmi_nesting, 145 rdp->dynticks->dynticks_nmi_nesting,
146 rdp->dynticks_fqs); 146 rdp->dynticks_fqs);
147 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 147 seq_printf(m, ",%lu", rdp->offline_fqs);
148 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, 148 seq_printf(m, ",%ld,%ld,\"%c%c%c%c\"", rdp->qlen_lazy, rdp->qlen,
149 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 149 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
150 rdp->nxttail[RCU_NEXT_TAIL]], 150 rdp->nxttail[RCU_NEXT_TAIL]],
151 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 151 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
@@ -168,7 +168,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
168{ 168{
169 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); 169 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); 171 seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
172#ifdef CONFIG_RCU_BOOST 172#ifdef CONFIG_RCU_BOOST
173 seq_puts(m, "\"kt\",\"ktl\""); 173 seq_puts(m, "\"kt\",\"ktl\"");
174#endif /* #ifdef CONFIG_RCU_BOOST */ 174#endif /* #ifdef CONFIG_RCU_BOOST */
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 0febf61e1aa3..ba35f3a4a1f4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -172,6 +172,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
172{ 172{
173 int idx; 173 int idx;
174 174
175 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
176 !lock_is_held(&rcu_bh_lock_map) &&
177 !lock_is_held(&rcu_lock_map) &&
178 !lock_is_held(&rcu_sched_lock_map),
179 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
180
175 idx = sp->completed; 181 idx = sp->completed;
176 mutex_lock(&sp->mutex); 182 mutex_lock(&sp->mutex);
177 183
@@ -280,19 +286,26 @@ void synchronize_srcu(struct srcu_struct *sp)
280EXPORT_SYMBOL_GPL(synchronize_srcu); 286EXPORT_SYMBOL_GPL(synchronize_srcu);
281 287
282/** 288/**
283 * synchronize_srcu_expedited - like synchronize_srcu, but less patient 289 * synchronize_srcu_expedited - Brute-force SRCU grace period
284 * @sp: srcu_struct with which to synchronize. 290 * @sp: srcu_struct with which to synchronize.
285 * 291 *
286 * Flip the completed counter, and wait for the old count to drain to zero. 292 * Wait for an SRCU grace period to elapse, but use a "big hammer"
287 * As with classic RCU, the updater must use some separate means of 293 * approach to force the grace period to end quickly. This consumes
288 * synchronizing concurrent updates. Can block; must be called from 294 * significant time on all CPUs and is unfriendly to real-time workloads,
289 * process context. 295 * so is thus not recommended for any sort of common-case code. In fact,
296 * if you are using synchronize_srcu_expedited() in a loop, please
297 * restructure your code to batch your updates, and then use a single
298 * synchronize_srcu() instead.
290 * 299 *
291 * Note that it is illegal to call synchronize_srcu_expedited() 300 * Note that it is illegal to call this function while holding any lock
292 * from the corresponding SRCU read-side critical section; doing so 301 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
293 * will result in deadlock. However, it is perfectly legal to call 302 * to call this function from a CPU-hotplug notifier. Failing to observe
294 * synchronize_srcu_expedited() on one srcu_struct from some other 303 * these restriction will result in deadlock. It is also illegal to call
295 * srcu_struct's read-side critical section. 304 * synchronize_srcu_expedited() from the corresponding SRCU read-side
305 * critical section; doing so will result in deadlock. However, it is
306 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
307 * from some other srcu_struct's read-side critical section, as long as
308 * the resulting graph of srcu_structs is acyclic.
296 */ 309 */
297void synchronize_srcu_expedited(struct srcu_struct *sp) 310void synchronize_srcu_expedited(struct srcu_struct *sp)
298{ 311{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8745ac7d1f75..d27a2aa3e815 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -927,6 +927,30 @@ config RCU_CPU_STALL_VERBOSE
927 927
928 Say Y if you want to enable such checks. 928 Say Y if you want to enable such checks.
929 929
930config RCU_CPU_STALL_INFO
931 bool "Print additional diagnostics on RCU CPU stall"
932 depends on (TREE_RCU || TREE_PREEMPT_RCU) && DEBUG_KERNEL
933 default n
934 help
935 For each stalled CPU that is aware of the current RCU grace
936 period, print out additional per-CPU diagnostic information
937 regarding scheduling-clock ticks, idle state, and,
938 for RCU_FAST_NO_HZ kernels, idle-entry state.
939
940 Say N if you are unsure.
941
942 Say Y if you want to enable such diagnostics.
943
944config RCU_TRACE
945 bool "Enable tracing for RCU"
946 depends on DEBUG_KERNEL
947 help
948 This option provides tracing in RCU which presents stats
949 in debugfs for debugging RCU implementation.
950
951 Say Y here if you want to enable RCU tracing
952 Say N if you are unsure.
953
930config KPROBES_SANITY_TEST 954config KPROBES_SANITY_TEST
931 bool "Kprobes sanity tests" 955 bool "Kprobes sanity tests"
932 depends on DEBUG_KERNEL 956 depends on DEBUG_KERNEL
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 86f3b885b4f3..c48adc565e92 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1857,11 +1857,6 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1857 return CIPSO_V4_HDR_LEN + ret_val; 1857 return CIPSO_V4_HDR_LEN + ret_val;
1858} 1858}
1859 1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1865/** 1860/**
1866 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1861 * cipso_v4_sock_setattr - Add a CIPSO option to a socket
1867 * @sk: the socket 1862 * @sk: the socket
@@ -1938,7 +1933,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1938 } 1933 }
1939 rcu_assign_pointer(sk_inet->inet_opt, opt); 1934 rcu_assign_pointer(sk_inet->inet_opt, opt);
1940 if (old) 1935 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu); 1936 kfree_rcu(old, rcu);
1942 1937
1943 return 0; 1938 return 0;
1944 1939
@@ -2005,7 +2000,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
2005 req_inet = inet_rsk(req); 2000 req_inet = inet_rsk(req);
2006 opt = xchg(&req_inet->opt, opt); 2001 opt = xchg(&req_inet->opt, opt);
2007 if (opt) 2002 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu); 2003 kfree_rcu(opt, rcu);
2009 2004
2010 return 0; 2005 return 0;
2011 2006
@@ -2075,7 +2070,7 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2075 * remove the entire option struct */ 2070 * remove the entire option struct */
2076 *opt_ptr = NULL; 2071 *opt_ptr = NULL;
2077 hdr_delta = opt->opt.optlen; 2072 hdr_delta = opt->opt.optlen;
2078 call_rcu(&opt->rcu, opt_kfree_rcu); 2073 kfree_rcu(opt, rcu);
2079 } 2074 }
2080 2075
2081 return hdr_delta; 2076 return hdr_delta;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8aa87c19fa00..5343d9ac510b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -445,11 +445,6 @@ out:
445} 445}
446 446
447 447
448static void opt_kfree_rcu(struct rcu_head *head)
449{
450 kfree(container_of(head, struct ip_options_rcu, rcu));
451}
452
453/* 448/*
454 * Socket option code for IP. This is the end of the line after any 449 * Socket option code for IP. This is the end of the line after any
455 * TCP,UDP etc options on an IP socket. 450 * TCP,UDP etc options on an IP socket.
@@ -525,7 +520,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
525 } 520 }
526 rcu_assign_pointer(inet->inet_opt, opt); 521 rcu_assign_pointer(inet->inet_opt, opt);
527 if (old) 522 if (old)
528 call_rcu(&old->rcu, opt_kfree_rcu); 523 kfree_rcu(old, rcu);
529 break; 524 break;
530 } 525 }
531 case IP_PKTINFO: 526 case IP_PKTINFO:
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index edf167e3b8f3..30420bc1f699 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -413,12 +413,6 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
413 return NULL; 413 return NULL;
414} 414}
415 415
416static void mesh_gate_node_reclaim(struct rcu_head *rp)
417{
418 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
419 kfree(node);
420}
421
422/** 416/**
423 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 417 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
424 * @mpath: gate path to add to table 418 * @mpath: gate path to add to table
@@ -479,7 +473,7 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
479 if (gate->mpath == mpath) { 473 if (gate->mpath == mpath) {
480 spin_lock_bh(&tbl->gates_lock); 474 spin_lock_bh(&tbl->gates_lock);
481 hlist_del_rcu(&gate->list); 475 hlist_del_rcu(&gate->list);
482 call_rcu(&gate->rcu, mesh_gate_node_reclaim); 476 kfree_rcu(gate, rcu);
483 spin_unlock_bh(&tbl->gates_lock); 477 spin_unlock_bh(&tbl->gates_lock);
484 mpath->sdata->u.mesh.num_gates--; 478 mpath->sdata->u.mesh.num_gates--;
485 mpath->is_gate = false; 479 mpath->is_gate = false;