diff options
-rw-r--r-- | Documentation/RCU/RTFP.txt | 858 | ||||
-rw-r--r-- | Documentation/RCU/rcubarrier.txt | 12 | ||||
-rw-r--r-- | Documentation/RCU/torture.txt | 10 | ||||
-rw-r--r-- | Documentation/memory-barriers.txt | 10 | ||||
-rw-r--r-- | Documentation/timers/NO_HZ.txt | 44 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 7 | ||||
-rw-r--r-- | include/linux/debugobjects.h | 6 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 34 | ||||
-rw-r--r-- | include/linux/jiffies.h | 8 | ||||
-rw-r--r-- | include/linux/rculist.h | 5 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 26 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 82 | ||||
-rw-r--r-- | init/Kconfig | 1 | ||||
-rw-r--r-- | kernel/rcu.h | 12 | ||||
-rw-r--r-- | kernel/rcupdate.c | 102 | ||||
-rw-r--r-- | kernel/rcutiny.c | 2 | ||||
-rw-r--r-- | kernel/rcutiny_plugin.h | 2 | ||||
-rw-r--r-- | kernel/rcutorture.c | 396 | ||||
-rw-r--r-- | kernel/rcutree.c | 255 | ||||
-rw-r--r-- | kernel/rcutree.h | 19 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 460 | ||||
-rw-r--r-- | kernel/time/Kconfig | 50 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 19 | ||||
-rw-r--r-- | lib/debugobjects.c | 20 |
25 files changed, 1610 insertions, 833 deletions
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt index 7f40c72a9c51..273e654d7d08 100644 --- a/Documentation/RCU/RTFP.txt +++ b/Documentation/RCU/RTFP.txt | |||
@@ -39,7 +39,7 @@ in read-mostly situations. This algorithm does take pains to avoid | |||
39 | write-side contention and parallelize the other write-side overheads by | 39 | write-side contention and parallelize the other write-side overheads by |
40 | providing a fine-grained locking design, however, it would be interesting | 40 | providing a fine-grained locking design, however, it would be interesting |
41 | to see how much of the performance advantage reported in 1990 remains | 41 | to see how much of the performance advantage reported in 1990 remains |
42 | in 2004. | 42 | today. |
43 | 43 | ||
44 | At about this same time, Adams [Adams91] described ``chaotic relaxation'', | 44 | At about this same time, Adams [Adams91] described ``chaotic relaxation'', |
45 | where the normal barriers between successive iterations of convergent | 45 | where the normal barriers between successive iterations of convergent |
@@ -86,9 +86,9 @@ DYNIX/ptx kernel. The corresponding conference paper appeared in 1998 | |||
86 | [McKenney98]. | 86 | [McKenney98]. |
87 | 87 | ||
88 | In 1999, the Tornado and K42 groups described their "generations" | 88 | In 1999, the Tornado and K42 groups described their "generations" |
89 | mechanism, which quite similar to RCU [Gamsa99]. These operating systems | 89 | mechanism, which is quite similar to RCU [Gamsa99]. These operating |
90 | made pervasive use of RCU in place of "existence locks", which greatly | 90 | systems made pervasive use of RCU in place of "existence locks", which |
91 | simplifies locking hierarchies. | 91 | greatly simplifies locking hierarchies and helps avoid deadlocks. |
92 | 92 | ||
93 | 2001 saw the first RCU presentation involving Linux [McKenney01a] | 93 | 2001 saw the first RCU presentation involving Linux [McKenney01a] |
94 | at OLS. The resulting abundance of RCU patches was presented the | 94 | at OLS. The resulting abundance of RCU patches was presented the |
@@ -106,8 +106,11 @@ these techniques still impose significant read-side overhead in the | |||
106 | form of memory barriers. Researchers at Sun worked along similar lines | 106 | form of memory barriers. Researchers at Sun worked along similar lines |
107 | in the same timeframe [HerlihyLM02]. These techniques can be thought | 107 | in the same timeframe [HerlihyLM02]. These techniques can be thought |
108 | of as inside-out reference counts, where the count is represented by the | 108 | of as inside-out reference counts, where the count is represented by the |
109 | number of hazard pointers referencing a given data structure (rather than | 109 | number of hazard pointers referencing a given data structure rather than |
110 | the more conventional counter field within the data structure itself). | 110 | the more conventional counter field within the data structure itself. |
111 | The key advantage of inside-out reference counts is that they can be | ||
112 | stored in immortal variables, thus allowing races between access and | ||
113 | deletion to be avoided. | ||
111 | 114 | ||
112 | By the same token, RCU can be thought of as a "bulk reference count", | 115 | By the same token, RCU can be thought of as a "bulk reference count", |
113 | where some form of reference counter covers all reference by a given CPU | 116 | where some form of reference counter covers all reference by a given CPU |
@@ -179,7 +182,25 @@ tree using software transactional memory to protect concurrent updates | |||
179 | (strange, but true!) [PhilHoward2011RCUTMRBTree], yet another variant of | 182 | (strange, but true!) [PhilHoward2011RCUTMRBTree], yet another variant of |
180 | RCU-protected resizeable hash tables [Triplett:2011:RPHash], the 3.0 RCU | 183 | RCU-protected resizeable hash tables [Triplett:2011:RPHash], the 3.0 RCU |
181 | trainwreck [PaulEMcKenney2011RCU3.0trainwreck], and Neil Brown's "Meet the | 184 | trainwreck [PaulEMcKenney2011RCU3.0trainwreck], and Neil Brown's "Meet the |
182 | Lockers" LWN article [NeilBrown2011MeetTheLockers]. | 185 | Lockers" LWN article [NeilBrown2011MeetTheLockers]. Some academic |
186 | work looked at debugging uses of RCU [Seyster:2011:RFA:2075416.2075425]. | ||
187 | |||
188 | In 2012, Josh Triplett received his Ph.D. with his dissertation | ||
189 | covering RCU-protected resizable hash tables and the relationship | ||
190 | between memory barriers and read-side traversal order: If the updater | ||
191 | is making changes in the opposite direction from the read-side traveral | ||
192 | order, the updater need only execute a memory-barrier instruction, | ||
193 | but if in the same direction, the updater needs to wait for a grace | ||
194 | period between the individual updates [JoshTriplettPhD]. Also in 2012, | ||
195 | after seventeen years of attempts, an RCU paper made it into a top-flight | ||
196 | academic journal, IEEE Transactions on Parallel and Distributed Systems | ||
197 | [MathieuDesnoyers2012URCU]. A group of researchers in Spain applied | ||
198 | user-level RCU to crowd simulation [GuillermoVigueras2012RCUCrowd], and | ||
199 | another group of researchers in Europe produced a formal description of | ||
200 | RCU based on separation logic [AlexeyGotsman2012VerifyGraceExtended], | ||
201 | which was published in the 2013 European Symposium on Programming | ||
202 | [AlexeyGotsman2013ESOPRCU]. | ||
203 | |||
183 | 204 | ||
184 | 205 | ||
185 | Bibtex Entries | 206 | Bibtex Entries |
@@ -193,13 +214,12 @@ Bibtex Entries | |||
193 | ,volume="5" | 214 | ,volume="5" |
194 | ,number="3" | 215 | ,number="3" |
195 | ,pages="354-382" | 216 | ,pages="354-382" |
196 | ,note="Available: | ||
197 | \url{http://portal.acm.org/citation.cfm?id=320619&dl=GUIDE,} | ||
198 | [Viewed December 3, 2007]" | ||
199 | ,annotation={ | 217 | ,annotation={ |
200 | Use garbage collector to clean up data after everyone is done with it. | 218 | Use garbage collector to clean up data after everyone is done with it. |
201 | . | 219 | . |
202 | Oldest use of something vaguely resembling RCU that I have found. | 220 | Oldest use of something vaguely resembling RCU that I have found. |
221 | http://portal.acm.org/citation.cfm?id=320619&dl=GUIDE, | ||
222 | [Viewed December 3, 2007] | ||
203 | } | 223 | } |
204 | } | 224 | } |
205 | 225 | ||
@@ -309,7 +329,7 @@ for Programming Languages and Operating Systems}" | |||
309 | ,doi = {http://doi.acm.org/10.1145/42392.42399} | 329 | ,doi = {http://doi.acm.org/10.1145/42392.42399} |
310 | ,publisher = {ACM} | 330 | ,publisher = {ACM} |
311 | ,address = {New York, NY, USA} | 331 | ,address = {New York, NY, USA} |
312 | ,annotation= { | 332 | ,annotation={ |
313 | At the top of page 307: "Conflicts with deposits and withdrawals | 333 | At the top of page 307: "Conflicts with deposits and withdrawals |
314 | are necessary if the reported total is to be up to date. They | 334 | are necessary if the reported total is to be up to date. They |
315 | could be avoided by having total return a sum that is slightly | 335 | could be avoided by having total return a sum that is slightly |
@@ -346,8 +366,9 @@ for Programming Languages and Operating Systems}" | |||
346 | } | 366 | } |
347 | } | 367 | } |
348 | 368 | ||
349 | @Book{Adams91 | 369 | # Was Adams91, see also syncrefs.bib. |
350 | ,Author="Gregory R. Adams" | 370 | @Book{Andrews91textbook |
371 | ,Author="Gregory R. Andrews" | ||
351 | ,title="Concurrent Programming, Principles, and Practices" | 372 | ,title="Concurrent Programming, Principles, and Practices" |
352 | ,Publisher="Benjamin Cummins" | 373 | ,Publisher="Benjamin Cummins" |
353 | ,Year="1991" | 374 | ,Year="1991" |
@@ -398,39 +419,39 @@ for Programming Languages and Operating Systems}" | |||
398 | } | 419 | } |
399 | } | 420 | } |
400 | 421 | ||
401 | @conference{Pu95a, | 422 | @conference{Pu95a |
402 | Author = "Calton Pu and Tito Autrey and Andrew Black and Charles Consel and | 423 | ,Author = "Calton Pu and Tito Autrey and Andrew Black and Charles Consel and |
403 | Crispin Cowan and Jon Inouye and Lakshmi Kethana and Jonathan Walpole and | 424 | Crispin Cowan and Jon Inouye and Lakshmi Kethana and Jonathan Walpole and |
404 | Ke Zhang", | 425 | Ke Zhang" |
405 | Title = "Optimistic Incremental Specialization: Streamlining a Commercial | 426 | ,Title = "Optimistic Incremental Specialization: Streamlining a Commercial |
406 | Operating System", | 427 | ,Operating System" |
407 | Booktitle = "15\textsuperscript{th} ACM Symposium on | 428 | ,Booktitle = "15\textsuperscript{th} ACM Symposium on |
408 | Operating Systems Principles (SOSP'95)", | 429 | ,Operating Systems Principles (SOSP'95)" |
409 | address = "Copper Mountain, CO", | 430 | ,address = "Copper Mountain, CO" |
410 | month="December", | 431 | ,month="December" |
411 | year="1995", | 432 | ,year="1995" |
412 | pages="314-321", | 433 | ,pages="314-321" |
413 | annotation=" | 434 | ,annotation={ |
414 | Uses a replugger, but with a flag to signal when people are | 435 | Uses a replugger, but with a flag to signal when people are |
415 | using the resource at hand. Only one reader at a time. | 436 | using the resource at hand. Only one reader at a time. |
416 | " | 437 | } |
417 | } | 438 | } |
418 | 439 | ||
419 | @conference{Cowan96a, | 440 | @conference{Cowan96a |
420 | Author = "Crispin Cowan and Tito Autrey and Charles Krasic and | 441 | ,Author = "Crispin Cowan and Tito Autrey and Charles Krasic and |
421 | Calton Pu and Jonathan Walpole", | 442 | ,Calton Pu and Jonathan Walpole" |
422 | Title = "Fast Concurrent Dynamic Linking for an Adaptive Operating System", | 443 | ,Title = "Fast Concurrent Dynamic Linking for an Adaptive Operating System" |
423 | Booktitle = "International Conference on Configurable Distributed Systems | 444 | ,Booktitle = "International Conference on Configurable Distributed Systems |
424 | (ICCDS'96)", | 445 | (ICCDS'96)" |
425 | address = "Annapolis, MD", | 446 | ,address = "Annapolis, MD" |
426 | month="May", | 447 | ,month="May" |
427 | year="1996", | 448 | ,year="1996" |
428 | pages="108", | 449 | ,pages="108" |
429 | isbn="0-8186-7395-8", | 450 | ,isbn="0-8186-7395-8" |
430 | annotation=" | 451 | ,annotation={ |
431 | Uses a replugger, but with a counter to signal when people are | 452 | Uses a replugger, but with a counter to signal when people are |
432 | using the resource at hand. Allows multiple readers. | 453 | using the resource at hand. Allows multiple readers. |
433 | " | 454 | } |
434 | } | 455 | } |
435 | 456 | ||
436 | @techreport{Slingwine95 | 457 | @techreport{Slingwine95 |
@@ -493,14 +514,13 @@ Problems" | |||
493 | ,Year="1998" | 514 | ,Year="1998" |
494 | ,pages="509-518" | 515 | ,pages="509-518" |
495 | ,Address="Las Vegas, NV" | 516 | ,Address="Las Vegas, NV" |
496 | ,note="Available: | ||
497 | \url{http://www.rdrop.com/users/paulmck/RCU/rclockpdcsproof.pdf} | ||
498 | [Viewed December 3, 2007]" | ||
499 | ,annotation={ | 517 | ,annotation={ |
500 | Describes and analyzes RCU mechanism in DYNIX/ptx. Describes | 518 | Describes and analyzes RCU mechanism in DYNIX/ptx. Describes |
501 | application to linked list update and log-buffer flushing. | 519 | application to linked list update and log-buffer flushing. |
502 | Defines 'quiescent state'. Includes both measured and analytic | 520 | Defines 'quiescent state'. Includes both measured and analytic |
503 | evaluation. | 521 | evaluation. |
522 | http://www.rdrop.com/users/paulmck/RCU/rclockpdcsproof.pdf | ||
523 | [Viewed December 3, 2007] | ||
504 | } | 524 | } |
505 | } | 525 | } |
506 | 526 | ||
@@ -514,13 +534,12 @@ Operating System Design and Implementation}" | |||
514 | ,Year="1999" | 534 | ,Year="1999" |
515 | ,pages="87-100" | 535 | ,pages="87-100" |
516 | ,Address="New Orleans, LA" | 536 | ,Address="New Orleans, LA" |
517 | ,note="Available: | ||
518 | \url{http://www.usenix.org/events/osdi99/full_papers/gamsa/gamsa.pdf} | ||
519 | [Viewed August 30, 2006]" | ||
520 | ,annotation={ | 537 | ,annotation={ |
521 | Use of RCU-like facility in K42/Tornado. Another independent | 538 | Use of RCU-like facility in K42/Tornado. Another independent |
522 | invention of RCU. | 539 | invention of RCU. |
523 | See especially pages 7-9 (Section 5). | 540 | See especially pages 7-9 (Section 5). |
541 | http://www.usenix.org/events/osdi99/full_papers/gamsa/gamsa.pdf | ||
542 | [Viewed August 30, 2006] | ||
524 | } | 543 | } |
525 | } | 544 | } |
526 | 545 | ||
@@ -611,9 +630,9 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni" | |||
611 | ,note="Available: | 630 | ,note="Available: |
612 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100259266316456&w=2} | 631 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100259266316456&w=2} |
613 | [Viewed June 23, 2004]" | 632 | [Viewed June 23, 2004]" |
614 | ,annotation=" | 633 | ,annotation={ |
615 | Memory-barrier and Alpha thread. 100 messages, not too bad... | 634 | Memory-barrier and Alpha thread. 100 messages, not too bad... |
616 | " | 635 | } |
617 | } | 636 | } |
618 | 637 | ||
619 | @unpublished{Spraul01 | 638 | @unpublished{Spraul01 |
@@ -624,10 +643,10 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni" | |||
624 | ,note="Available: | 643 | ,note="Available: |
625 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100264675012867&w=2} | 644 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100264675012867&w=2} |
626 | [Viewed June 23, 2004]" | 645 | [Viewed June 23, 2004]" |
627 | ,annotation=" | 646 | ,annotation={ |
628 | Suggested burying memory barriers in Linux's list-manipulation | 647 | Suggested burying memory barriers in Linux's list-manipulation |
629 | primitives. | 648 | primitives. |
630 | " | 649 | } |
631 | } | 650 | } |
632 | 651 | ||
633 | @unpublished{LinusTorvalds2001a | 652 | @unpublished{LinusTorvalds2001a |
@@ -638,6 +657,8 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni" | |||
638 | ,note="Available: | 657 | ,note="Available: |
639 | \url{http://lkml.org/lkml/2001/10/13/105} | 658 | \url{http://lkml.org/lkml/2001/10/13/105} |
640 | [Viewed August 21, 2004]" | 659 | [Viewed August 21, 2004]" |
660 | ,annotation={ | ||
661 | } | ||
641 | } | 662 | } |
642 | 663 | ||
643 | @unpublished{Blanchard02a | 664 | @unpublished{Blanchard02a |
@@ -657,10 +678,10 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni" | |||
657 | ,Month="June" | 678 | ,Month="June" |
658 | ,Year="2002" | 679 | ,Year="2002" |
659 | ,pages="289-300" | 680 | ,pages="289-300" |
660 | ,annotation=" | 681 | ,annotation={ |
661 | Measured scalability of Linux 2.4 kernel's directory-entry cache | 682 | Measured scalability of Linux 2.4 kernel's directory-entry cache |
662 | (dcache), and measured some scalability enhancements. | 683 | (dcache), and measured some scalability enhancements. |
663 | " | 684 | } |
664 | } | 685 | } |
665 | 686 | ||
666 | @Conference{McKenney02a | 687 | @Conference{McKenney02a |
@@ -674,10 +695,10 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" | |||
674 | ,note="Available: | 695 | ,note="Available: |
675 | \url{http://www.linux.org.uk/~ajh/ols2002_proceedings.pdf.gz} | 696 | \url{http://www.linux.org.uk/~ajh/ols2002_proceedings.pdf.gz} |
676 | [Viewed June 23, 2004]" | 697 | [Viewed June 23, 2004]" |
677 | ,annotation=" | 698 | ,annotation={ |
678 | Presented and compared a number of RCU implementations for the | 699 | Presented and compared a number of RCU implementations for the |
679 | Linux kernel. | 700 | Linux kernel. |
680 | " | 701 | } |
681 | } | 702 | } |
682 | 703 | ||
683 | @unpublished{Sarma02a | 704 | @unpublished{Sarma02a |
@@ -688,9 +709,9 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" | |||
688 | ,note="Available: | 709 | ,note="Available: |
689 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=102645767914212&w=2} | 710 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=102645767914212&w=2} |
690 | [Viewed June 23, 2004]" | 711 | [Viewed June 23, 2004]" |
691 | ,annotation=" | 712 | ,annotation={ |
692 | Compare fastwalk and RCU for dcache. RCU won. | 713 | Compare fastwalk and RCU for dcache. RCU won. |
693 | " | 714 | } |
694 | } | 715 | } |
695 | 716 | ||
696 | @unpublished{Barbieri02 | 717 | @unpublished{Barbieri02 |
@@ -701,9 +722,9 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" | |||
701 | ,note="Available: | 722 | ,note="Available: |
702 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103082050621241&w=2} | 723 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103082050621241&w=2} |
703 | [Viewed: June 23, 2004]" | 724 | [Viewed: June 23, 2004]" |
704 | ,annotation=" | 725 | ,annotation={ |
705 | Suggested RCU for vfs\_shared\_cred. | 726 | Suggested RCU for vfs\_shared\_cred. |
706 | " | 727 | } |
707 | } | 728 | } |
708 | 729 | ||
709 | @unpublished{Dickins02a | 730 | @unpublished{Dickins02a |
@@ -722,10 +743,10 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" | |||
722 | ,note="Available: | 743 | ,note="Available: |
723 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103462075416638&w=2} | 744 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103462075416638&w=2} |
724 | [Viewed June 23, 2004]" | 745 | [Viewed June 23, 2004]" |
725 | ,annotation=" | 746 | ,annotation={ |
726 | Performance of dcache RCU on kernbench for 16x NUMA-Q and 1x, | 747 | Performance of dcache RCU on kernbench for 16x NUMA-Q and 1x, |
727 | 2x, and 4x systems. RCU does no harm, and helps on 16x. | 748 | 2x, and 4x systems. RCU does no harm, and helps on 16x. |
728 | " | 749 | } |
729 | } | 750 | } |
730 | 751 | ||
731 | @unpublished{LinusTorvalds2003a | 752 | @unpublished{LinusTorvalds2003a |
@@ -736,14 +757,14 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" | |||
736 | ,note="Available: | 757 | ,note="Available: |
737 | \url{http://lkml.org/lkml/2003/3/9/205} | 758 | \url{http://lkml.org/lkml/2003/3/9/205} |
738 | [Viewed March 13, 2006]" | 759 | [Viewed March 13, 2006]" |
739 | ,annotation=" | 760 | ,annotation={ |
740 | Linus suggests replacing brlock with RCU and/or seqlocks: | 761 | Linus suggests replacing brlock with RCU and/or seqlocks: |
741 | . | 762 | . |
742 | 'It's entirely possible that the current user could be replaced | 763 | 'It's entirely possible that the current user could be replaced |
743 | by RCU and/or seqlocks, and we could get rid of brlocks entirely.' | 764 | by RCU and/or seqlocks, and we could get rid of brlocks entirely.' |
744 | . | 765 | . |
745 | Steve Hemminger responds by replacing them with RCU. | 766 | Steve Hemminger responds by replacing them with RCU. |
746 | " | 767 | } |
747 | } | 768 | } |
748 | 769 | ||
749 | @article{Appavoo03a | 770 | @article{Appavoo03a |
@@ -758,9 +779,9 @@ B. Rosenburg and M. Stumm and J. Xenidis" | |||
758 | ,volume="42" | 779 | ,volume="42" |
759 | ,number="1" | 780 | ,number="1" |
760 | ,pages="60-76" | 781 | ,pages="60-76" |
761 | ,annotation=" | 782 | ,annotation={ |
762 | Use of RCU to enable hot-swapping for autonomic behavior in K42. | 783 | Use of RCU to enable hot-swapping for autonomic behavior in K42. |
763 | " | 784 | } |
764 | } | 785 | } |
765 | 786 | ||
766 | @unpublished{Seigh03 | 787 | @unpublished{Seigh03 |
@@ -769,9 +790,9 @@ B. Rosenburg and M. Stumm and J. Xenidis" | |||
769 | ,Year="2003" | 790 | ,Year="2003" |
770 | ,Month="March" | 791 | ,Month="March" |
771 | ,note="email correspondence" | 792 | ,note="email correspondence" |
772 | ,annotation=" | 793 | ,annotation={ |
773 | Described the relationship of the VM/XA passive serialization to RCU. | 794 | Described the relationship of the VM/XA passive serialization to RCU. |
774 | " | 795 | } |
775 | } | 796 | } |
776 | 797 | ||
777 | @Conference{Arcangeli03 | 798 | @Conference{Arcangeli03 |
@@ -785,14 +806,12 @@ Dipankar Sarma" | |||
785 | ,year="2003" | 806 | ,year="2003" |
786 | ,month="June" | 807 | ,month="June" |
787 | ,pages="297-310" | 808 | ,pages="297-310" |
788 | ,note="Available: | 809 | ,annotation={ |
789 | \url{http://www.rdrop.com/users/paulmck/RCU/rcu.FREENIX.2003.06.14.pdf} | ||
790 | [Viewed November 21, 2007]" | ||
791 | ,annotation=" | ||
792 | Compared updated RCU implementations for the Linux kernel, and | 810 | Compared updated RCU implementations for the Linux kernel, and |
793 | described System V IPC use of RCU, including order-of-magnitude | 811 | described System V IPC use of RCU, including order-of-magnitude |
794 | performance improvements. | 812 | performance improvements. |
795 | " | 813 | http://www.rdrop.com/users/paulmck/RCU/rcu.FREENIX.2003.06.14.pdf |
814 | } | ||
796 | } | 815 | } |
797 | 816 | ||
798 | @Conference{Soules03a | 817 | @Conference{Soules03a |
@@ -820,10 +839,10 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
820 | ,note="Available: | 839 | ,note="Available: |
821 | \url{http://www.linuxjournal.com/article/6993} | 840 | \url{http://www.linuxjournal.com/article/6993} |
822 | [Viewed November 14, 2007]" | 841 | [Viewed November 14, 2007]" |
823 | ,annotation=" | 842 | ,annotation={ |
824 | Reader-friendly intro to RCU, with the infamous old-man-and-brat | 843 | Reader-friendly intro to RCU, with the infamous old-man-and-brat |
825 | cartoon. | 844 | cartoon. |
826 | " | 845 | } |
827 | } | 846 | } |
828 | 847 | ||
829 | @unpublished{Sarma03a | 848 | @unpublished{Sarma03a |
@@ -832,7 +851,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
832 | ,month="December" | 851 | ,month="December" |
833 | ,year="2003" | 852 | ,year="2003" |
834 | ,note="Message ID: 20031222180114.GA2248@in.ibm.com" | 853 | ,note="Message ID: 20031222180114.GA2248@in.ibm.com" |
835 | ,annotation="dipankar/ct.2004.03.27/RCUll.2003.12.22.patch" | 854 | ,annotation={ |
855 | dipankar/ct.2004.03.27/RCUll.2003.12.22.patch | ||
856 | } | ||
836 | } | 857 | } |
837 | 858 | ||
838 | @techreport{Friedberg03a | 859 | @techreport{Friedberg03a |
@@ -844,11 +865,11 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
844 | ,number="US Patent 6,662,184" | 865 | ,number="US Patent 6,662,184" |
845 | ,month="December" | 866 | ,month="December" |
846 | ,pages="112" | 867 | ,pages="112" |
847 | ,annotation=" | 868 | ,annotation={ |
848 | Applies RCU to a wildcard-search Patricia tree in order to permit | 869 | Applies RCU to a wildcard-search Patricia tree in order to permit |
849 | synchronization-free lookup. RCU is used to retain removed nodes | 870 | synchronization-free lookup. RCU is used to retain removed nodes |
850 | for a grace period before freeing them. | 871 | for a grace period before freeing them. |
851 | " | 872 | } |
852 | } | 873 | } |
853 | 874 | ||
854 | @article{McKenney04a | 875 | @article{McKenney04a |
@@ -860,12 +881,11 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
860 | ,volume="1" | 881 | ,volume="1" |
861 | ,number="118" | 882 | ,number="118" |
862 | ,pages="38-46" | 883 | ,pages="38-46" |
863 | ,note="Available: | 884 | ,annotation={ |
864 | \url{http://www.linuxjournal.com/node/7124} | ||
865 | [Viewed December 26, 2010]" | ||
866 | ,annotation=" | ||
867 | Reader friendly intro to dcache and RCU. | 885 | Reader friendly intro to dcache and RCU. |
868 | " | 886 | http://www.linuxjournal.com/node/7124 |
887 | [Viewed December 26, 2010] | ||
888 | } | ||
869 | } | 889 | } |
870 | 890 | ||
871 | @Conference{McKenney04b | 891 | @Conference{McKenney04b |
@@ -879,10 +899,10 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
879 | \url{http://www.linux.org.au/conf/2004/abstracts.html#90} | 899 | \url{http://www.linux.org.au/conf/2004/abstracts.html#90} |
880 | \url{http://www.rdrop.com/users/paulmck/RCU/lockperf.2004.01.17a.pdf} | 900 | \url{http://www.rdrop.com/users/paulmck/RCU/lockperf.2004.01.17a.pdf} |
881 | [Viewed June 23, 2004]" | 901 | [Viewed June 23, 2004]" |
882 | ,annotation=" | 902 | ,annotation={ |
883 | Compares performance of RCU to that of other locking primitives | 903 | Compares performance of RCU to that of other locking primitives |
884 | over a number of CPUs (x86, Opteron, Itanium, and PPC). | 904 | over a number of CPUs (x86, Opteron, Itanium, and PPC). |
885 | " | 905 | } |
886 | } | 906 | } |
887 | 907 | ||
888 | @unpublished{Sarma04a | 908 | @unpublished{Sarma04a |
@@ -891,7 +911,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
891 | ,month="March" | 911 | ,month="March" |
892 | ,year="2004" | 912 | ,year="2004" |
893 | ,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108003746402892&w=2}" | 913 | ,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108003746402892&w=2}" |
894 | ,annotation="Head of thread: dipankar/2004.03.23/rcu-low-lat.1.patch" | 914 | ,annotation={ |
915 | Head of thread: dipankar/2004.03.23/rcu-low-lat.1.patch | ||
916 | } | ||
895 | } | 917 | } |
896 | 918 | ||
897 | @unpublished{Sarma04b | 919 | @unpublished{Sarma04b |
@@ -900,7 +922,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
900 | ,month="March" | 922 | ,month="March" |
901 | ,year="2004" | 923 | ,year="2004" |
902 | ,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108016474829546&w=2}" | 924 | ,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108016474829546&w=2}" |
903 | ,annotation="dipankar/rcuth.2004.03.24/rcu-throttle.patch" | 925 | ,annotation={ |
926 | dipankar/rcuth.2004.03.24/rcu-throttle.patch | ||
927 | } | ||
904 | } | 928 | } |
905 | 929 | ||
906 | @unpublished{Spraul04a | 930 | @unpublished{Spraul04a |
@@ -911,9 +935,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis" | |||
911 | ,note="Available: | 935 | ,note="Available: |
912 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108546407726602&w=2} | 936 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108546407726602&w=2} |
913 | [Viewed June 23, 2004]" | 937 | [Viewed June 23, 2004]" |
914 | ,annotation=" | 938 | ,annotation={ |
915 | Hierarchical-bitmap patch for RCU infrastructure. | 939 | Hierarchical-bitmap patch for RCU infrastructure. |
916 | " | 940 | } |
917 | } | 941 | } |
918 | 942 | ||
919 | @unpublished{Steiner04a | 943 | @unpublished{Steiner04a |
@@ -950,10 +974,12 @@ Realtime Applications" | |||
950 | ,year="2004" | 974 | ,year="2004" |
951 | ,month="June" | 975 | ,month="June" |
952 | ,pages="182-191" | 976 | ,pages="182-191" |
953 | ,annotation=" | 977 | ,annotation={ |
954 | Describes and compares a number of modifications to the Linux RCU | 978 | Describes and compares a number of modifications to the Linux RCU |
955 | implementation that make it friendly to realtime applications. | 979 | implementation that make it friendly to realtime applications. |
956 | " | 980 | https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response |
981 | [Viewed July 26, 2012] | ||
982 | } | ||
957 | } | 983 | } |
958 | 984 | ||
959 | @phdthesis{PaulEdwardMcKenneyPhD | 985 | @phdthesis{PaulEdwardMcKenneyPhD |
@@ -964,14 +990,13 @@ in Operating System Kernels" | |||
964 | ,school="OGI School of Science and Engineering at | 990 | ,school="OGI School of Science and Engineering at |
965 | Oregon Health and Sciences University" | 991 | Oregon Health and Sciences University" |
966 | ,year="2004" | 992 | ,year="2004" |
967 | ,note="Available: | 993 | ,annotation={ |
968 | \url{http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf} | ||
969 | [Viewed October 15, 2004]" | ||
970 | ,annotation=" | ||
971 | Describes RCU implementations and presents design patterns | 994 | Describes RCU implementations and presents design patterns |
972 | corresponding to common uses of RCU in several operating-system | 995 | corresponding to common uses of RCU in several operating-system |
973 | kernels. | 996 | kernels. |
974 | " | 997 | http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf |
998 | [Viewed October 15, 2004] | ||
999 | } | ||
975 | } | 1000 | } |
976 | 1001 | ||
977 | @unpublished{PaulEMcKenney2004rcu:dereference | 1002 | @unpublished{PaulEMcKenney2004rcu:dereference |
@@ -982,9 +1007,9 @@ Oregon Health and Sciences University" | |||
982 | ,note="Available: | 1007 | ,note="Available: |
983 | \url{http://lkml.org/lkml/2004/8/6/237} | 1008 | \url{http://lkml.org/lkml/2004/8/6/237} |
984 | [Viewed June 8, 2010]" | 1009 | [Viewed June 8, 2010]" |
985 | ,annotation=" | 1010 | ,annotation={ |
986 | Introduce rcu_dereference(). | 1011 | Introduce rcu_dereference(). |
987 | " | 1012 | } |
988 | } | 1013 | } |
989 | 1014 | ||
990 | @unpublished{JimHouston04a | 1015 | @unpublished{JimHouston04a |
@@ -995,11 +1020,11 @@ Oregon Health and Sciences University" | |||
995 | ,note="Available: | 1020 | ,note="Available: |
996 | \url{http://lkml.org/lkml/2004/8/30/87} | 1021 | \url{http://lkml.org/lkml/2004/8/30/87} |
997 | [Viewed February 17, 2005]" | 1022 | [Viewed February 17, 2005]" |
998 | ,annotation=" | 1023 | ,annotation={ |
999 | Uses active code in rcu_read_lock() and rcu_read_unlock() to | 1024 | Uses active code in rcu_read_lock() and rcu_read_unlock() to |
1000 | make RCU happen, allowing RCU to function on CPUs that do not | 1025 | make RCU happen, allowing RCU to function on CPUs that do not |
1001 | receive a scheduling-clock interrupt. | 1026 | receive a scheduling-clock interrupt. |
1002 | " | 1027 | } |
1003 | } | 1028 | } |
1004 | 1029 | ||
1005 | @unpublished{TomHart04a | 1030 | @unpublished{TomHart04a |
@@ -1010,9 +1035,9 @@ Oregon Health and Sciences University" | |||
1010 | ,note="Available: | 1035 | ,note="Available: |
1011 | \url{http://www.cs.toronto.edu/~tomhart/masters_thesis.html} | 1036 | \url{http://www.cs.toronto.edu/~tomhart/masters_thesis.html} |
1012 | [Viewed October 15, 2004]" | 1037 | [Viewed October 15, 2004]" |
1013 | ,annotation=" | 1038 | ,annotation={ |
1014 | Proposes comparing RCU to lock-free methods for the Linux kernel. | 1039 | Proposes comparing RCU to lock-free methods for the Linux kernel. |
1015 | " | 1040 | } |
1016 | } | 1041 | } |
1017 | 1042 | ||
1018 | @unpublished{Vaddagiri04a | 1043 | @unpublished{Vaddagiri04a |
@@ -1023,9 +1048,9 @@ Oregon Health and Sciences University" | |||
1023 | ,note="Available: | 1048 | ,note="Available: |
1024 | \url{http://marc.theaimsgroup.com/?t=109395731700004&r=1&w=2} | 1049 | \url{http://marc.theaimsgroup.com/?t=109395731700004&r=1&w=2} |
1025 | [Viewed October 18, 2004]" | 1050 | [Viewed October 18, 2004]" |
1026 | ,annotation=" | 1051 | ,annotation={ |
1027 | Srivatsa's RCU patch for tcp_ehash lookup. | 1052 | Srivatsa's RCU patch for tcp_ehash lookup. |
1028 | " | 1053 | } |
1029 | } | 1054 | } |
1030 | 1055 | ||
1031 | @unpublished{Thirumalai04a | 1056 | @unpublished{Thirumalai04a |
@@ -1036,9 +1061,9 @@ Oregon Health and Sciences University" | |||
1036 | ,note="Available: | 1061 | ,note="Available: |
1037 | \url{http://marc.theaimsgroup.com/?t=109144217400003&r=1&w=2} | 1062 | \url{http://marc.theaimsgroup.com/?t=109144217400003&r=1&w=2} |
1038 | [Viewed October 18, 2004]" | 1063 | [Viewed October 18, 2004]" |
1039 | ,annotation=" | 1064 | ,annotation={ |
1040 | Ravikiran's lockfree FD patch. | 1065 | Ravikiran's lockfree FD patch. |
1041 | " | 1066 | } |
1042 | } | 1067 | } |
1043 | 1068 | ||
1044 | @unpublished{Thirumalai04b | 1069 | @unpublished{Thirumalai04b |
@@ -1049,9 +1074,9 @@ Oregon Health and Sciences University" | |||
1049 | ,note="Available: | 1074 | ,note="Available: |
1050 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=109152521410459&w=2} | 1075 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=109152521410459&w=2} |
1051 | [Viewed October 18, 2004]" | 1076 | [Viewed October 18, 2004]" |
1052 | ,annotation=" | 1077 | ,annotation={ |
1053 | Ravikiran's lockfree FD patch. | 1078 | Ravikiran's lockfree FD patch. |
1054 | " | 1079 | } |
1055 | } | 1080 | } |
1056 | 1081 | ||
1057 | @unpublished{PaulEMcKenney2004rcu:assign:pointer | 1082 | @unpublished{PaulEMcKenney2004rcu:assign:pointer |
@@ -1062,9 +1087,9 @@ Oregon Health and Sciences University" | |||
1062 | ,note="Available: | 1087 | ,note="Available: |
1063 | \url{http://lkml.org/lkml/2004/10/23/241} | 1088 | \url{http://lkml.org/lkml/2004/10/23/241} |
1064 | [Viewed June 8, 2010]" | 1089 | [Viewed June 8, 2010]" |
1065 | ,annotation=" | 1090 | ,annotation={ |
1066 | Introduce rcu_assign_pointer(). | 1091 | Introduce rcu_assign_pointer(). |
1067 | " | 1092 | } |
1068 | } | 1093 | } |
1069 | 1094 | ||
1070 | @unpublished{JamesMorris04a | 1095 | @unpublished{JamesMorris04a |
@@ -1073,12 +1098,12 @@ Oregon Health and Sciences University" | |||
1073 | ,day="15" | 1098 | ,day="15" |
1074 | ,month="November" | 1099 | ,month="November" |
1075 | ,year="2004" | 1100 | ,year="2004" |
1076 | ,note="Available: | 1101 | ,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=110054979416004&w=2}" |
1077 | \url{http://marc.theaimsgroup.com/?l=linux-kernel&m=110054979416004&w=2} | 1102 | ,annotation={ |
1078 | [Viewed December 10, 2004]" | ||
1079 | ,annotation=" | ||
1080 | James Morris posts Kaigai Kohei's patch to LKML. | 1103 | James Morris posts Kaigai Kohei's patch to LKML. |
1081 | " | 1104 | [Viewed December 10, 2004] |
1105 | Kaigai's patch is at https://lkml.org/lkml/2004/9/27/52 | ||
1106 | } | ||
1082 | } | 1107 | } |
1083 | 1108 | ||
1084 | @unpublished{JamesMorris04b | 1109 | @unpublished{JamesMorris04b |
@@ -1089,9 +1114,9 @@ Oregon Health and Sciences University" | |||
1089 | ,note="Available: | 1114 | ,note="Available: |
1090 | \url{http://www.livejournal.com/users/james_morris/2153.html} | 1115 | \url{http://www.livejournal.com/users/james_morris/2153.html} |
1091 | [Viewed December 10, 2004]" | 1116 | [Viewed December 10, 2004]" |
1092 | ,annotation=" | 1117 | ,annotation={ |
1093 | RCU helps SELinux performance. ;-) Made LWN. | 1118 | RCU helps SELinux performance. ;-) Made LWN. |
1094 | " | 1119 | } |
1095 | } | 1120 | } |
1096 | 1121 | ||
1097 | @unpublished{PaulMcKenney2005RCUSemantics | 1122 | @unpublished{PaulMcKenney2005RCUSemantics |
@@ -1103,9 +1128,9 @@ Oregon Health and Sciences University" | |||
1103 | ,note="Available: | 1128 | ,note="Available: |
1104 | \url{http://www.rdrop.com/users/paulmck/RCU/rcu-semantics.2005.01.30a.pdf} | 1129 | \url{http://www.rdrop.com/users/paulmck/RCU/rcu-semantics.2005.01.30a.pdf} |
1105 | [Viewed December 6, 2009]" | 1130 | [Viewed December 6, 2009]" |
1106 | ,annotation=" | 1131 | ,annotation={ |
1107 | Early derivation of RCU semantics. | 1132 | Early derivation of RCU semantics. |
1108 | " | 1133 | } |
1109 | } | 1134 | } |
1110 | 1135 | ||
1111 | @unpublished{PaulMcKenney2005e | 1136 | @unpublished{PaulMcKenney2005e |
@@ -1117,10 +1142,10 @@ Oregon Health and Sciences University" | |||
1117 | ,note="Available: | 1142 | ,note="Available: |
1118 | \url{http://lkml.org/lkml/2005/3/17/199} | 1143 | \url{http://lkml.org/lkml/2005/3/17/199} |
1119 | [Viewed September 5, 2005]" | 1144 | [Viewed September 5, 2005]" |
1120 | ,annotation=" | 1145 | ,annotation={ |
1121 | First posting showing how RCU can be safely adapted for | 1146 | First posting showing how RCU can be safely adapted for |
1122 | preemptable RCU read side critical sections. | 1147 | preemptable RCU read side critical sections. |
1123 | " | 1148 | } |
1124 | } | 1149 | } |
1125 | 1150 | ||
1126 | @unpublished{EsbenNeilsen2005a | 1151 | @unpublished{EsbenNeilsen2005a |
@@ -1132,12 +1157,12 @@ Oregon Health and Sciences University" | |||
1132 | ,note="Available: | 1157 | ,note="Available: |
1133 | \url{http://lkml.org/lkml/2005/3/18/122} | 1158 | \url{http://lkml.org/lkml/2005/3/18/122} |
1134 | [Viewed March 30, 2006]" | 1159 | [Viewed March 30, 2006]" |
1135 | ,annotation=" | 1160 | ,annotation={ |
1136 | Esben Neilsen suggests read-side suppression of grace-period | 1161 | Esben Neilsen suggests read-side suppression of grace-period |
1137 | processing for crude-but-workable realtime RCU. The downside | 1162 | processing for crude-but-workable realtime RCU. The downside |
1138 | is indefinite grace periods...But this is OK for experimentation | 1163 | is indefinite grace periods... But this is OK for experimentation |
1139 | and testing. | 1164 | and testing. |
1140 | " | 1165 | } |
1141 | } | 1166 | } |
1142 | 1167 | ||
1143 | @unpublished{TomHart05a | 1168 | @unpublished{TomHart05a |
@@ -1149,10 +1174,10 @@ Data Structures" | |||
1149 | ,note="Available: | 1174 | ,note="Available: |
1150 | \url{ftp://ftp.cs.toronto.edu/csrg-technical-reports/515/} | 1175 | \url{ftp://ftp.cs.toronto.edu/csrg-technical-reports/515/} |
1151 | [Viewed March 4, 2005]" | 1176 | [Viewed March 4, 2005]" |
1152 | ,annotation=" | 1177 | ,annotation={ |
1153 | Comparison of RCU, QBSR, and EBSR. RCU wins for read-mostly | 1178 | Comparison of RCU, QBSR, and EBSR. RCU wins for read-mostly |
1154 | workloads. ;-) | 1179 | workloads. ;-) |
1155 | " | 1180 | } |
1156 | } | 1181 | } |
1157 | 1182 | ||
1158 | @unpublished{JonCorbet2005DeprecateSyncKernel | 1183 | @unpublished{JonCorbet2005DeprecateSyncKernel |
@@ -1164,10 +1189,10 @@ Data Structures" | |||
1164 | ,note="Available: | 1189 | ,note="Available: |
1165 | \url{http://lwn.net/Articles/134484/} | 1190 | \url{http://lwn.net/Articles/134484/} |
1166 | [Viewed May 3, 2005]" | 1191 | [Viewed May 3, 2005]" |
1167 | ,annotation=" | 1192 | ,annotation={ |
1168 | Jon Corbet describes deprecation of synchronize_kernel() | 1193 | Jon Corbet describes deprecation of synchronize_kernel() |
1169 | in favor of synchronize_rcu() and synchronize_sched(). | 1194 | in favor of synchronize_rcu() and synchronize_sched(). |
1170 | " | 1195 | } |
1171 | } | 1196 | } |
1172 | 1197 | ||
1173 | @unpublished{PaulMcKenney05a | 1198 | @unpublished{PaulMcKenney05a |
@@ -1178,10 +1203,10 @@ Data Structures" | |||
1178 | ,note="Available: | 1203 | ,note="Available: |
1179 | \url{http://lkml.org/lkml/2005/5/9/185} | 1204 | \url{http://lkml.org/lkml/2005/5/9/185} |
1180 | [Viewed May 13, 2005]" | 1205 | [Viewed May 13, 2005]" |
1181 | ,annotation=" | 1206 | ,annotation={ |
1182 | First publication of working lock-based deferred free patches | 1207 | First publication of working lock-based deferred free patches |
1183 | for the CONFIG_PREEMPT_RT environment. | 1208 | for the CONFIG_PREEMPT_RT environment. |
1184 | " | 1209 | } |
1185 | } | 1210 | } |
1186 | 1211 | ||
1187 | @conference{PaulMcKenney05b | 1212 | @conference{PaulMcKenney05b |
@@ -1194,10 +1219,10 @@ Data Structures" | |||
1194 | ,note="Available: | 1219 | ,note="Available: |
1195 | \url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf} | 1220 | \url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf} |
1196 | [Viewed May 13, 2005]" | 1221 | [Viewed May 13, 2005]" |
1197 | ,annotation=" | 1222 | ,annotation={ |
1198 | Realtime turns into making RCU yet more realtime friendly. | 1223 | Realtime turns into making RCU yet more realtime friendly. |
1199 | http://lca2005.linux.org.au/Papers/Paul%20McKenney/Towards%20Hard%20Realtime%20Response%20from%20the%20Linux%20Kernel/LKS.2005.04.22a.pdf | 1224 | http://lca2005.linux.org.au/Papers/Paul%20McKenney/Towards%20Hard%20Realtime%20Response%20from%20the%20Linux%20Kernel/LKS.2005.04.22a.pdf |
1200 | " | 1225 | } |
1201 | } | 1226 | } |
1202 | 1227 | ||
1203 | @unpublished{PaulEMcKenneyHomePage | 1228 | @unpublished{PaulEMcKenneyHomePage |
@@ -1208,9 +1233,9 @@ Data Structures" | |||
1208 | ,note="Available: | 1233 | ,note="Available: |
1209 | \url{http://www.rdrop.com/users/paulmck/} | 1234 | \url{http://www.rdrop.com/users/paulmck/} |
1210 | [Viewed May 25, 2005]" | 1235 | [Viewed May 25, 2005]" |
1211 | ,annotation=" | 1236 | ,annotation={ |
1212 | Paul McKenney's home page. | 1237 | Paul McKenney's home page. |
1213 | " | 1238 | } |
1214 | } | 1239 | } |
1215 | 1240 | ||
1216 | @unpublished{PaulEMcKenneyRCUPage | 1241 | @unpublished{PaulEMcKenneyRCUPage |
@@ -1221,9 +1246,9 @@ Data Structures" | |||
1221 | ,note="Available: | 1246 | ,note="Available: |
1222 | \url{http://www.rdrop.com/users/paulmck/RCU} | 1247 | \url{http://www.rdrop.com/users/paulmck/RCU} |
1223 | [Viewed May 25, 2005]" | 1248 | [Viewed May 25, 2005]" |
1224 | ,annotation=" | 1249 | ,annotation={ |
1225 | Paul McKenney's RCU page. | 1250 | Paul McKenney's RCU page. |
1226 | " | 1251 | } |
1227 | } | 1252 | } |
1228 | 1253 | ||
1229 | @unpublished{JosephSeigh2005a | 1254 | @unpublished{JosephSeigh2005a |
@@ -1232,10 +1257,10 @@ Data Structures" | |||
1232 | ,month="July" | 1257 | ,month="July" |
1233 | ,year="2005" | 1258 | ,year="2005" |
1234 | ,note="Personal communication" | 1259 | ,note="Personal communication" |
1235 | ,annotation=" | 1260 | ,annotation={ |
1236 | Joe Seigh announcing his atomic-ptr-plus project. | 1261 | Joe Seigh announcing his atomic-ptr-plus project. |
1237 | http://sourceforge.net/projects/atomic-ptr-plus/ | 1262 | http://sourceforge.net/projects/atomic-ptr-plus/ |
1238 | " | 1263 | } |
1239 | } | 1264 | } |
1240 | 1265 | ||
1241 | @unpublished{JosephSeigh2005b | 1266 | @unpublished{JosephSeigh2005b |
@@ -1247,9 +1272,9 @@ Data Structures" | |||
1247 | ,note="Available: | 1272 | ,note="Available: |
1248 | \url{http://sourceforge.net/projects/atomic-ptr-plus/} | 1273 | \url{http://sourceforge.net/projects/atomic-ptr-plus/} |
1249 | [Viewed August 8, 2005]" | 1274 | [Viewed August 8, 2005]" |
1250 | ,annotation=" | 1275 | ,annotation={ |
1251 | Joe Seigh's atomic-ptr-plus project. | 1276 | Joe Seigh's atomic-ptr-plus project. |
1252 | " | 1277 | } |
1253 | } | 1278 | } |
1254 | 1279 | ||
1255 | @unpublished{PaulMcKenney2005c | 1280 | @unpublished{PaulMcKenney2005c |
@@ -1261,9 +1286,9 @@ Data Structures" | |||
1261 | ,note="Available: | 1286 | ,note="Available: |
1262 | \url{http://lkml.org/lkml/2005/8/1/155} | 1287 | \url{http://lkml.org/lkml/2005/8/1/155} |
1263 | [Viewed March 14, 2006]" | 1288 | [Viewed March 14, 2006]" |
1264 | ,annotation=" | 1289 | ,annotation={ |
1265 | First operating counter-based realtime RCU patch posted to LKML. | 1290 | First operating counter-based realtime RCU patch posted to LKML. |
1266 | " | 1291 | } |
1267 | } | 1292 | } |
1268 | 1293 | ||
1269 | @unpublished{PaulMcKenney2005d | 1294 | @unpublished{PaulMcKenney2005d |
@@ -1275,11 +1300,11 @@ Data Structures" | |||
1275 | ,note="Available: | 1300 | ,note="Available: |
1276 | \url{http://lkml.org/lkml/2005/8/8/108} | 1301 | \url{http://lkml.org/lkml/2005/8/8/108} |
1277 | [Viewed March 14, 2006]" | 1302 | [Viewed March 14, 2006]" |
1278 | ,annotation=" | 1303 | ,annotation={ |
1279 | First operating counter-based realtime RCU patch posted to LKML, | 1304 | First operating counter-based realtime RCU patch posted to LKML, |
1280 | but fixed so that various unusual combinations of configuration | 1305 | but fixed so that various unusual combinations of configuration |
1281 | parameters all function properly. | 1306 | parameters all function properly. |
1282 | " | 1307 | } |
1283 | } | 1308 | } |
1284 | 1309 | ||
1285 | @unpublished{PaulMcKenney2005rcutorture | 1310 | @unpublished{PaulMcKenney2005rcutorture |
@@ -1291,9 +1316,25 @@ Data Structures" | |||
1291 | ,note="Available: | 1316 | ,note="Available: |
1292 | \url{http://lkml.org/lkml/2005/10/1/70} | 1317 | \url{http://lkml.org/lkml/2005/10/1/70} |
1293 | [Viewed March 14, 2006]" | 1318 | [Viewed March 14, 2006]" |
1294 | ,annotation=" | 1319 | ,annotation={ |
1295 | First rcutorture patch. | 1320 | First rcutorture patch. |
1296 | " | 1321 | } |
1322 | } | ||
1323 | |||
1324 | @unpublished{DavidSMiller2006HashedLocking | ||
1325 | ,Author="David S. Miller" | ||
1326 | ,Title="Re: [{PATCH}, {RFC}] {RCU} : {OOM} avoidance and lower latency" | ||
1327 | ,month="January" | ||
1328 | ,day="6" | ||
1329 | ,year="2006" | ||
1330 | ,note="Available: | ||
1331 | \url{https://lkml.org/lkml/2006/1/7/22} | ||
1332 | [Viewed February 29, 2012]" | ||
1333 | ,annotation={ | ||
1334 | David Miller's view on hashed arrays of locks: used to really | ||
1335 | like it, but time he saw an opportunity for this technique, | ||
1336 | something else always proved superior. Partitioning or RCU. ;-) | ||
1337 | } | ||
1297 | } | 1338 | } |
1298 | 1339 | ||
1299 | @conference{ThomasEHart2006a | 1340 | @conference{ThomasEHart2006a |
@@ -1309,10 +1350,10 @@ Distributed Processing Symposium" | |||
1309 | ,note="Available: | 1350 | ,note="Available: |
1310 | \url{http://www.rdrop.com/users/paulmck/RCU/hart_ipdps06.pdf} | 1351 | \url{http://www.rdrop.com/users/paulmck/RCU/hart_ipdps06.pdf} |
1311 | [Viewed April 28, 2008]" | 1352 | [Viewed April 28, 2008]" |
1312 | ,annotation=" | 1353 | ,annotation={ |
1313 | Compares QSBR, HPBR, EBR, and lock-free reference counting. | 1354 | Compares QSBR, HPBR, EBR, and lock-free reference counting. |
1314 | http://www.cs.toronto.edu/~tomhart/perflab/ipdps06.tgz | 1355 | http://www.cs.toronto.edu/~tomhart/perflab/ipdps06.tgz |
1315 | " | 1356 | } |
1316 | } | 1357 | } |
1317 | 1358 | ||
1318 | @unpublished{NickPiggin2006radixtree | 1359 | @unpublished{NickPiggin2006radixtree |
@@ -1324,9 +1365,9 @@ Distributed Processing Symposium" | |||
1324 | ,note="Available: | 1365 | ,note="Available: |
1325 | \url{http://lkml.org/lkml/2006/6/20/238} | 1366 | \url{http://lkml.org/lkml/2006/6/20/238} |
1326 | [Viewed March 25, 2008]" | 1367 | [Viewed March 25, 2008]" |
1327 | ,annotation=" | 1368 | ,annotation={ |
1328 | RCU-protected radix tree. | 1369 | RCU-protected radix tree. |
1329 | " | 1370 | } |
1330 | } | 1371 | } |
1331 | 1372 | ||
1332 | @Conference{PaulEMcKenney2006b | 1373 | @Conference{PaulEMcKenney2006b |
@@ -1341,9 +1382,9 @@ Suparna Bhattacharya" | |||
1341 | \url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184} | 1382 | \url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184} |
1342 | \url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf} | 1383 | \url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf} |
1343 | [Viewed January 1, 2007]" | 1384 | [Viewed January 1, 2007]" |
1344 | ,annotation=" | 1385 | ,annotation={ |
1345 | Described how to improve the -rt implementation of realtime RCU. | 1386 | Described how to improve the -rt implementation of realtime RCU. |
1346 | " | 1387 | } |
1347 | } | 1388 | } |
1348 | 1389 | ||
1349 | @unpublished{WikipediaRCU | 1390 | @unpublished{WikipediaRCU |
@@ -1354,12 +1395,11 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen" | |||
1354 | ,month="July" | 1395 | ,month="July" |
1355 | ,day="8" | 1396 | ,day="8" |
1356 | ,year="2006" | 1397 | ,year="2006" |
1357 | ,note="Available: | 1398 | ,note="\url{http://en.wikipedia.org/wiki/Read-copy-update}" |
1358 | \url{http://en.wikipedia.org/wiki/Read-copy-update} | 1399 | ,annotation={ |
1359 | [Viewed August 21, 2006]" | ||
1360 | ,annotation=" | ||
1361 | Wikipedia RCU page as of July 8 2006. | 1400 | Wikipedia RCU page as of July 8 2006. |
1362 | " | 1401 | [Viewed August 21, 2006] |
1402 | } | ||
1363 | } | 1403 | } |
1364 | 1404 | ||
1365 | @Conference{NickPiggin2006LocklessPageCache | 1405 | @Conference{NickPiggin2006LocklessPageCache |
@@ -1372,9 +1412,9 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen" | |||
1372 | ,note="Available: | 1412 | ,note="Available: |
1373 | \url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184} | 1413 | \url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184} |
1374 | [Viewed January 11, 2009]" | 1414 | [Viewed January 11, 2009]" |
1375 | ,annotation=" | 1415 | ,annotation={ |
1376 | Uses RCU-protected radix tree for a lockless page cache. | 1416 | Uses RCU-protected radix tree for a lockless page cache. |
1377 | " | 1417 | } |
1378 | } | 1418 | } |
1379 | 1419 | ||
1380 | @unpublished{PaulEMcKenney2006c | 1420 | @unpublished{PaulEMcKenney2006c |
@@ -1388,9 +1428,9 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen" | |||
1388 | Revised: | 1428 | Revised: |
1389 | \url{http://www.rdrop.com/users/paulmck/RCU/srcu.2007.01.14a.pdf} | 1429 | \url{http://www.rdrop.com/users/paulmck/RCU/srcu.2007.01.14a.pdf} |
1390 | [Viewed August 21, 2006]" | 1430 | [Viewed August 21, 2006]" |
1391 | ,annotation=" | 1431 | ,annotation={ |
1392 | LWN article introducing SRCU. | 1432 | LWN article introducing SRCU. |
1393 | " | 1433 | } |
1394 | } | 1434 | } |
1395 | 1435 | ||
1396 | @unpublished{RobertOlsson2006a | 1436 | @unpublished{RobertOlsson2006a |
@@ -1399,12 +1439,11 @@ Revised: | |||
1399 | ,month="August" | 1439 | ,month="August" |
1400 | ,day="18" | 1440 | ,day="18" |
1401 | ,year="2006" | 1441 | ,year="2006" |
1402 | ,note="Available: | 1442 | ,note="\url{http://www.nada.kth.se/~snilsson/publications/TRASH/trash.pdf}" |
1403 | \url{http://www.nada.kth.se/~snilsson/publications/TRASH/trash.pdf} | 1443 | ,annotation={ |
1404 | [Viewed March 4, 2011]" | ||
1405 | ,annotation=" | ||
1406 | RCU-protected dynamic trie-hash combination. | 1444 | RCU-protected dynamic trie-hash combination. |
1407 | " | 1445 | [Viewed March 4, 2011] |
1446 | } | ||
1408 | } | 1447 | } |
1409 | 1448 | ||
1410 | @unpublished{ChristophHellwig2006RCU2SRCU | 1449 | @unpublished{ChristophHellwig2006RCU2SRCU |
@@ -1426,10 +1465,10 @@ Revised: | |||
1426 | ,note="Available: | 1465 | ,note="Available: |
1427 | \url{http://www.rdrop.com/users/paulmck/RCU/linuxusage.html} | 1466 | \url{http://www.rdrop.com/users/paulmck/RCU/linuxusage.html} |
1428 | [Viewed January 14, 2007]" | 1467 | [Viewed January 14, 2007]" |
1429 | ,annotation=" | 1468 | ,annotation={ |
1430 | Paul McKenney's RCU page showing graphs plotting Linux-kernel | 1469 | Paul McKenney's RCU page showing graphs plotting Linux-kernel |
1431 | usage of RCU. | 1470 | usage of RCU. |
1432 | " | 1471 | } |
1433 | } | 1472 | } |
1434 | 1473 | ||
1435 | @unpublished{PaulEMcKenneyRCUusageRawDataPage | 1474 | @unpublished{PaulEMcKenneyRCUusageRawDataPage |
@@ -1440,10 +1479,10 @@ Revised: | |||
1440 | ,note="Available: | 1479 | ,note="Available: |
1441 | \url{http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html} | 1480 | \url{http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html} |
1442 | [Viewed January 14, 2007]" | 1481 | [Viewed January 14, 2007]" |
1443 | ,annotation=" | 1482 | ,annotation={ |
1444 | Paul McKenney's RCU page showing Linux usage of RCU in tabular | 1483 | Paul McKenney's RCU page showing Linux usage of RCU in tabular |
1445 | form, with links to corresponding cscope databases. | 1484 | form, with links to corresponding cscope databases. |
1446 | " | 1485 | } |
1447 | } | 1486 | } |
1448 | 1487 | ||
1449 | @unpublished{GauthamShenoy2006RCUrwlock | 1488 | @unpublished{GauthamShenoy2006RCUrwlock |
@@ -1455,13 +1494,13 @@ Revised: | |||
1455 | ,note="Available: | 1494 | ,note="Available: |
1456 | \url{http://lkml.org/lkml/2006/10/26/73} | 1495 | \url{http://lkml.org/lkml/2006/10/26/73} |
1457 | [Viewed January 26, 2009]" | 1496 | [Viewed January 26, 2009]" |
1458 | ,annotation=" | 1497 | ,annotation={ |
1459 | RCU-based reader-writer lock that allows readers to proceed with | 1498 | RCU-based reader-writer lock that allows readers to proceed with |
1460 | no memory barriers or atomic instruction in absence of writers. | 1499 | no memory barriers or atomic instruction in absence of writers. |
1461 | If writer do show up, readers must of course wait as required by | 1500 | If writer do show up, readers must of course wait as required by |
1462 | the semantics of reader-writer locking. This is a recursive | 1501 | the semantics of reader-writer locking. This is a recursive |
1463 | lock. | 1502 | lock. |
1464 | " | 1503 | } |
1465 | } | 1504 | } |
1466 | 1505 | ||
1467 | @unpublished{JensAxboe2006SlowSRCU | 1506 | @unpublished{JensAxboe2006SlowSRCU |
@@ -1474,11 +1513,11 @@ Revised: | |||
1474 | ,note="Available: | 1513 | ,note="Available: |
1475 | \url{http://lkml.org/lkml/2006/11/17/56} | 1514 | \url{http://lkml.org/lkml/2006/11/17/56} |
1476 | [Viewed May 28, 2007]" | 1515 | [Viewed May 28, 2007]" |
1477 | ,annotation=" | 1516 | ,annotation={ |
1478 | SRCU's grace periods are too slow for Jens, even after a | 1517 | SRCU's grace periods are too slow for Jens, even after a |
1479 | factor-of-three speedup. | 1518 | factor-of-three speedup. |
1480 | Sped-up version of SRCU at http://lkml.org/lkml/2006/11/17/359. | 1519 | Sped-up version of SRCU at http://lkml.org/lkml/2006/11/17/359. |
1481 | " | 1520 | } |
1482 | } | 1521 | } |
1483 | 1522 | ||
1484 | @unpublished{OlegNesterov2006QRCU | 1523 | @unpublished{OlegNesterov2006QRCU |
@@ -1491,10 +1530,10 @@ Revised: | |||
1491 | ,note="Available: | 1530 | ,note="Available: |
1492 | \url{http://lkml.org/lkml/2006/11/19/69} | 1531 | \url{http://lkml.org/lkml/2006/11/19/69} |
1493 | [Viewed May 28, 2007]" | 1532 | [Viewed May 28, 2007]" |
1494 | ,annotation=" | 1533 | ,annotation={ |
1495 | First cut of QRCU. Expanded/corrected versions followed. | 1534 | First cut of QRCU. Expanded/corrected versions followed. |
1496 | Used to be OlegNesterov2007QRCU, now time-corrected. | 1535 | Used to be OlegNesterov2007QRCU, now time-corrected. |
1497 | " | 1536 | } |
1498 | } | 1537 | } |
1499 | 1538 | ||
1500 | @unpublished{OlegNesterov2006aQRCU | 1539 | @unpublished{OlegNesterov2006aQRCU |
@@ -1506,10 +1545,10 @@ Revised: | |||
1506 | ,note="Available: | 1545 | ,note="Available: |
1507 | \url{http://lkml.org/lkml/2006/11/29/330} | 1546 | \url{http://lkml.org/lkml/2006/11/29/330} |
1508 | [Viewed November 26, 2008]" | 1547 | [Viewed November 26, 2008]" |
1509 | ,annotation=" | 1548 | ,annotation={ |
1510 | Expanded/corrected version of QRCU. | 1549 | Expanded/corrected version of QRCU. |
1511 | Used to be OlegNesterov2007aQRCU, now time-corrected. | 1550 | Used to be OlegNesterov2007aQRCU, now time-corrected. |
1512 | " | 1551 | } |
1513 | } | 1552 | } |
1514 | 1553 | ||
1515 | @unpublished{EvgeniyPolyakov2006RCUslowdown | 1554 | @unpublished{EvgeniyPolyakov2006RCUslowdown |
@@ -1521,10 +1560,10 @@ Revised: | |||
1521 | ,note="Available: | 1560 | ,note="Available: |
1522 | \url{http://www.ioremap.net/node/41} | 1561 | \url{http://www.ioremap.net/node/41} |
1523 | [Viewed October 28, 2008]" | 1562 | [Viewed October 28, 2008]" |
1524 | ,annotation=" | 1563 | ,annotation={ |
1525 | Using RCU as a pure delay leads to a 2.5x slowdown in skbs in | 1564 | Using RCU as a pure delay leads to a 2.5x slowdown in skbs in |
1526 | the Linux kernel. | 1565 | the Linux kernel. |
1527 | " | 1566 | } |
1528 | } | 1567 | } |
1529 | 1568 | ||
1530 | @inproceedings{ChrisMatthews2006ClusteredObjectsRCU | 1569 | @inproceedings{ChrisMatthews2006ClusteredObjectsRCU |
@@ -1541,7 +1580,8 @@ Revised: | |||
1541 | ,annotation={ | 1580 | ,annotation={ |
1542 | Uses K42's RCU-like functionality to manage clustered-object | 1581 | Uses K42's RCU-like functionality to manage clustered-object |
1543 | lifetimes. | 1582 | lifetimes. |
1544 | }} | 1583 | } |
1584 | } | ||
1545 | 1585 | ||
1546 | @article{DilmaDaSilva2006K42 | 1586 | @article{DilmaDaSilva2006K42 |
1547 | ,author = {Silva, Dilma Da and Krieger, Orran and Wisniewski, Robert W. and Waterland, Amos and Tam, David and Baumann, Andrew} | 1587 | ,author = {Silva, Dilma Da and Krieger, Orran and Wisniewski, Robert W. and Waterland, Amos and Tam, David and Baumann, Andrew} |
@@ -1557,7 +1597,8 @@ Revised: | |||
1557 | ,address = {New York, NY, USA} | 1597 | ,address = {New York, NY, USA} |
1558 | ,annotation={ | 1598 | ,annotation={ |
1559 | Describes relationship of K42 generations to RCU. | 1599 | Describes relationship of K42 generations to RCU. |
1560 | }} | 1600 | } |
1601 | } | ||
1561 | 1602 | ||
1562 | # CoreyMinyard2007list_splice_rcu | 1603 | # CoreyMinyard2007list_splice_rcu |
1563 | @unpublished{CoreyMinyard2007list:splice:rcu | 1604 | @unpublished{CoreyMinyard2007list:splice:rcu |
@@ -1569,9 +1610,9 @@ Revised: | |||
1569 | ,note="Available: | 1610 | ,note="Available: |
1570 | \url{http://lkml.org/lkml/2007/1/3/112} | 1611 | \url{http://lkml.org/lkml/2007/1/3/112} |
1571 | [Viewed May 28, 2007]" | 1612 | [Viewed May 28, 2007]" |
1572 | ,annotation=" | 1613 | ,annotation={ |
1573 | Patch for list_splice_rcu(). | 1614 | Patch for list_splice_rcu(). |
1574 | " | 1615 | } |
1575 | } | 1616 | } |
1576 | 1617 | ||
1577 | @unpublished{PaulEMcKenney2007rcubarrier | 1618 | @unpublished{PaulEMcKenney2007rcubarrier |
@@ -1583,9 +1624,9 @@ Revised: | |||
1583 | ,note="Available: | 1624 | ,note="Available: |
1584 | \url{http://lwn.net/Articles/217484/} | 1625 | \url{http://lwn.net/Articles/217484/} |
1585 | [Viewed November 22, 2007]" | 1626 | [Viewed November 22, 2007]" |
1586 | ,annotation=" | 1627 | ,annotation={ |
1587 | LWN article introducing the rcu_barrier() primitive. | 1628 | LWN article introducing the rcu_barrier() primitive. |
1588 | " | 1629 | } |
1589 | } | 1630 | } |
1590 | 1631 | ||
1591 | @unpublished{PeterZijlstra2007SyncBarrier | 1632 | @unpublished{PeterZijlstra2007SyncBarrier |
@@ -1597,10 +1638,10 @@ Revised: | |||
1597 | ,note="Available: | 1638 | ,note="Available: |
1598 | \url{http://lkml.org/lkml/2007/1/28/34} | 1639 | \url{http://lkml.org/lkml/2007/1/28/34} |
1599 | [Viewed March 27, 2008]" | 1640 | [Viewed March 27, 2008]" |
1600 | ,annotation=" | 1641 | ,annotation={ |
1601 | RCU-like implementation for frequent updaters and rare readers(!). | 1642 | RCU-like implementation for frequent updaters and rare readers(!). |
1602 | Subsumed into QRCU. Maybe... | 1643 | Subsumed into QRCU. Maybe... |
1603 | " | 1644 | } |
1604 | } | 1645 | } |
1605 | 1646 | ||
1606 | @unpublished{PaulEMcKenney2007BoostRCU | 1647 | @unpublished{PaulEMcKenney2007BoostRCU |
@@ -1609,14 +1650,13 @@ Revised: | |||
1609 | ,month="February" | 1650 | ,month="February" |
1610 | ,day="5" | 1651 | ,day="5" |
1611 | ,year="2007" | 1652 | ,year="2007" |
1612 | ,note="Available: | 1653 | ,note="\url{http://lwn.net/Articles/220677/}" |
1613 | \url{http://lwn.net/Articles/220677/} | 1654 | ,annotation={ |
1614 | Revised: | ||
1615 | \url{http://www.rdrop.com/users/paulmck/RCU/RCUbooststate.2007.04.16a.pdf} | ||
1616 | [Viewed September 7, 2007]" | ||
1617 | ,annotation=" | ||
1618 | LWN article introducing RCU priority boosting. | 1655 | LWN article introducing RCU priority boosting. |
1619 | " | 1656 | Revised: |
1657 | http://www.rdrop.com/users/paulmck/RCU/RCUbooststate.2007.04.16a.pdf | ||
1658 | [Viewed September 7, 2007] | ||
1659 | } | ||
1620 | } | 1660 | } |
1621 | 1661 | ||
1622 | @unpublished{PaulMcKenney2007QRCUpatch | 1662 | @unpublished{PaulMcKenney2007QRCUpatch |
@@ -1628,9 +1668,9 @@ Revised: | |||
1628 | ,note="Available: | 1668 | ,note="Available: |
1629 | \url{http://lkml.org/lkml/2007/2/25/18} | 1669 | \url{http://lkml.org/lkml/2007/2/25/18} |
1630 | [Viewed March 27, 2008]" | 1670 | [Viewed March 27, 2008]" |
1631 | ,annotation=" | 1671 | ,annotation={ |
1632 | Patch for QRCU supplying lock-free fast path. | 1672 | Patch for QRCU supplying lock-free fast path. |
1633 | " | 1673 | } |
1634 | } | 1674 | } |
1635 | 1675 | ||
1636 | @article{JonathanAppavoo2007K42RCU | 1676 | @article{JonathanAppavoo2007K42RCU |
@@ -1647,7 +1687,8 @@ Revised: | |||
1647 | ,address = {New York, NY, USA} | 1687 | ,address = {New York, NY, USA} |
1648 | ,annotation={ | 1688 | ,annotation={ |
1649 | Role of RCU in K42. | 1689 | Role of RCU in K42. |
1650 | }} | 1690 | } |
1691 | } | ||
1651 | 1692 | ||
1652 | @conference{RobertOlsson2007Trash | 1693 | @conference{RobertOlsson2007Trash |
1653 | ,Author="Robert Olsson and Stefan Nilsson" | 1694 | ,Author="Robert Olsson and Stefan Nilsson" |
@@ -1658,9 +1699,9 @@ Revised: | |||
1658 | ,note="Available: | 1699 | ,note="Available: |
1659 | \url{http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4281239} | 1700 | \url{http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4281239} |
1660 | [Viewed October 1, 2010]" | 1701 | [Viewed October 1, 2010]" |
1661 | ,annotation=" | 1702 | ,annotation={ |
1662 | RCU-protected dynamic trie-hash combination. | 1703 | RCU-protected dynamic trie-hash combination. |
1663 | " | 1704 | } |
1664 | } | 1705 | } |
1665 | 1706 | ||
1666 | @conference{PeterZijlstra2007ConcurrentPagecacheRCU | 1707 | @conference{PeterZijlstra2007ConcurrentPagecacheRCU |
@@ -1673,10 +1714,10 @@ Revised: | |||
1673 | ,note="Available: | 1714 | ,note="Available: |
1674 | \url{http://ols.108.redhat.com/2007/Reprints/zijlstra-Reprint.pdf} | 1715 | \url{http://ols.108.redhat.com/2007/Reprints/zijlstra-Reprint.pdf} |
1675 | [Viewed April 14, 2008]" | 1716 | [Viewed April 14, 2008]" |
1676 | ,annotation=" | 1717 | ,annotation={ |
1677 | Page-cache modifications permitting RCU readers and concurrent | 1718 | Page-cache modifications permitting RCU readers and concurrent |
1678 | updates. | 1719 | updates. |
1679 | " | 1720 | } |
1680 | } | 1721 | } |
1681 | 1722 | ||
1682 | @unpublished{PaulEMcKenney2007whatisRCU | 1723 | @unpublished{PaulEMcKenney2007whatisRCU |
@@ -1701,11 +1742,11 @@ Revised: | |||
1701 | ,note="Available: | 1742 | ,note="Available: |
1702 | \url{http://lwn.net/Articles/243851/} | 1743 | \url{http://lwn.net/Articles/243851/} |
1703 | [Viewed September 8, 2007]" | 1744 | [Viewed September 8, 2007]" |
1704 | ,annotation=" | 1745 | ,annotation={ |
1705 | LWN article describing Promela and spin, and also using Oleg | 1746 | LWN article describing Promela and spin, and also using Oleg |
1706 | Nesterov's QRCU as an example (with Paul McKenney's fastpath). | 1747 | Nesterov's QRCU as an example (with Paul McKenney's fastpath). |
1707 | Merged patch at: http://lkml.org/lkml/2007/2/25/18 | 1748 | Merged patch at: http://lkml.org/lkml/2007/2/25/18 |
1708 | " | 1749 | } |
1709 | } | 1750 | } |
1710 | 1751 | ||
1711 | @unpublished{PaulEMcKenney2007WG21DDOatomics | 1752 | @unpublished{PaulEMcKenney2007WG21DDOatomics |
@@ -1714,12 +1755,12 @@ Revised: | |||
1714 | ,month="August" | 1755 | ,month="August" |
1715 | ,day="3" | 1756 | ,day="3" |
1716 | ,year="2007" | 1757 | ,year="2007" |
1717 | ,note="Preprint: | 1758 | ,note="Available: |
1718 | \url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm} | 1759 | \url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm} |
1719 | [Viewed December 7, 2009]" | 1760 | [Viewed December 7, 2009]" |
1720 | ,annotation=" | 1761 | ,annotation={ |
1721 | RCU for C++, parts 1 and 2. | 1762 | RCU for C++, parts 1 and 2. |
1722 | " | 1763 | } |
1723 | } | 1764 | } |
1724 | 1765 | ||
1725 | @unpublished{PaulEMcKenney2007WG21DDOannotation | 1766 | @unpublished{PaulEMcKenney2007WG21DDOannotation |
@@ -1728,12 +1769,12 @@ Revised: | |||
1728 | ,month="September" | 1769 | ,month="September" |
1729 | ,day="18" | 1770 | ,day="18" |
1730 | ,year="2008" | 1771 | ,year="2008" |
1731 | ,note="Preprint: | 1772 | ,note="Available: |
1732 | \url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2782.htm} | 1773 | \url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2782.htm} |
1733 | [Viewed December 7, 2009]" | 1774 | [Viewed December 7, 2009]" |
1734 | ,annotation=" | 1775 | ,annotation={ |
1735 | RCU for C++, part 2, updated many times. | 1776 | RCU for C++, part 2, updated many times. |
1736 | " | 1777 | } |
1737 | } | 1778 | } |
1738 | 1779 | ||
1739 | @unpublished{PaulEMcKenney2007PreemptibleRCUPatch | 1780 | @unpublished{PaulEMcKenney2007PreemptibleRCUPatch |
@@ -1745,10 +1786,10 @@ Revised: | |||
1745 | ,note="Available: | 1786 | ,note="Available: |
1746 | \url{http://lkml.org/lkml/2007/9/10/213} | 1787 | \url{http://lkml.org/lkml/2007/9/10/213} |
1747 | [Viewed October 25, 2007]" | 1788 | [Viewed October 25, 2007]" |
1748 | ,annotation=" | 1789 | ,annotation={ |
1749 | Final patch for preemptable RCU to -rt. (Later patches were | 1790 | Final patch for preemptable RCU to -rt. (Later patches were |
1750 | to mainline, eventually incorporated.) | 1791 | to mainline, eventually incorporated.) |
1751 | " | 1792 | } |
1752 | } | 1793 | } |
1753 | 1794 | ||
1754 | @unpublished{PaulEMcKenney2007PreemptibleRCU | 1795 | @unpublished{PaulEMcKenney2007PreemptibleRCU |
@@ -1760,9 +1801,9 @@ Revised: | |||
1760 | ,note="Available: | 1801 | ,note="Available: |
1761 | \url{http://lwn.net/Articles/253651/} | 1802 | \url{http://lwn.net/Articles/253651/} |
1762 | [Viewed October 25, 2007]" | 1803 | [Viewed October 25, 2007]" |
1763 | ,annotation=" | 1804 | ,annotation={ |
1764 | LWN article describing the design of preemptible RCU. | 1805 | LWN article describing the design of preemptible RCU. |
1765 | " | 1806 | } |
1766 | } | 1807 | } |
1767 | 1808 | ||
1768 | @article{ThomasEHart2007a | 1809 | @article{ThomasEHart2007a |
@@ -1783,6 +1824,7 @@ Revised: | |||
1783 | } | 1824 | } |
1784 | } | 1825 | } |
1785 | 1826 | ||
1827 | # MathieuDesnoyers2007call_rcu_schedNeeded | ||
1786 | @unpublished{MathieuDesnoyers2007call:rcu:schedNeeded | 1828 | @unpublished{MathieuDesnoyers2007call:rcu:schedNeeded |
1787 | ,Author="Mathieu Desnoyers" | 1829 | ,Author="Mathieu Desnoyers" |
1788 | ,Title="Re: [patch 1/2] {Linux} Kernel Markers - Support Multiple Probes" | 1830 | ,Title="Re: [patch 1/2] {Linux} Kernel Markers - Support Multiple Probes" |
@@ -1792,9 +1834,9 @@ Revised: | |||
1792 | ,note="Available: | 1834 | ,note="Available: |
1793 | \url{http://lkml.org/lkml/2007/12/20/244} | 1835 | \url{http://lkml.org/lkml/2007/12/20/244} |
1794 | [Viewed March 27, 2008]" | 1836 | [Viewed March 27, 2008]" |
1795 | ,annotation=" | 1837 | ,annotation={ |
1796 | Request for call_rcu_sched() and rcu_barrier_sched(). | 1838 | Request for call_rcu_sched() and rcu_barrier_sched(). |
1797 | " | 1839 | } |
1798 | } | 1840 | } |
1799 | 1841 | ||
1800 | 1842 | ||
@@ -1815,11 +1857,11 @@ Revised: | |||
1815 | ,note="Available: | 1857 | ,note="Available: |
1816 | \url{http://lwn.net/Articles/262464/} | 1858 | \url{http://lwn.net/Articles/262464/} |
1817 | [Viewed December 27, 2007]" | 1859 | [Viewed December 27, 2007]" |
1818 | ,annotation=" | 1860 | ,annotation={ |
1819 | Lays out the three basic components of RCU: (1) publish-subscribe, | 1861 | Lays out the three basic components of RCU: (1) publish-subscribe, |
1820 | (2) wait for pre-existing readers to complete, and (2) maintain | 1862 | (2) wait for pre-existing readers to complete, and (2) maintain |
1821 | multiple versions. | 1863 | multiple versions. |
1822 | " | 1864 | } |
1823 | } | 1865 | } |
1824 | 1866 | ||
1825 | @unpublished{PaulEMcKenney2008WhatIsRCUUsage | 1867 | @unpublished{PaulEMcKenney2008WhatIsRCUUsage |
@@ -1831,7 +1873,7 @@ Revised: | |||
1831 | ,note="Available: | 1873 | ,note="Available: |
1832 | \url{http://lwn.net/Articles/263130/} | 1874 | \url{http://lwn.net/Articles/263130/} |
1833 | [Viewed January 4, 2008]" | 1875 | [Viewed January 4, 2008]" |
1834 | ,annotation=" | 1876 | ,annotation={ |
1835 | Lays out six uses of RCU: | 1877 | Lays out six uses of RCU: |
1836 | 1. RCU is a Reader-Writer Lock Replacement | 1878 | 1. RCU is a Reader-Writer Lock Replacement |
1837 | 2. RCU is a Restricted Reference-Counting Mechanism | 1879 | 2. RCU is a Restricted Reference-Counting Mechanism |
@@ -1839,7 +1881,7 @@ Revised: | |||
1839 | 4. RCU is a Poor Man's Garbage Collector | 1881 | 4. RCU is a Poor Man's Garbage Collector |
1840 | 5. RCU is a Way of Providing Existence Guarantees | 1882 | 5. RCU is a Way of Providing Existence Guarantees |
1841 | 6. RCU is a Way of Waiting for Things to Finish | 1883 | 6. RCU is a Way of Waiting for Things to Finish |
1842 | " | 1884 | } |
1843 | } | 1885 | } |
1844 | 1886 | ||
1845 | @unpublished{PaulEMcKenney2008WhatIsRCUAPI | 1887 | @unpublished{PaulEMcKenney2008WhatIsRCUAPI |
@@ -1851,10 +1893,10 @@ Revised: | |||
1851 | ,note="Available: | 1893 | ,note="Available: |
1852 | \url{http://lwn.net/Articles/264090/} | 1894 | \url{http://lwn.net/Articles/264090/} |
1853 | [Viewed January 10, 2008]" | 1895 | [Viewed January 10, 2008]" |
1854 | ,annotation=" | 1896 | ,annotation={ |
1855 | Gives an overview of the Linux-kernel RCU API and a brief annotated RCU | 1897 | Gives an overview of the Linux-kernel RCU API and a brief annotated RCU |
1856 | bibliography. | 1898 | bibliography. |
1857 | " | 1899 | } |
1858 | } | 1900 | } |
1859 | 1901 | ||
1860 | # | 1902 | # |
@@ -1872,10 +1914,10 @@ Revised: | |||
1872 | ,note="Available: | 1914 | ,note="Available: |
1873 | \url{http://lkml.org/lkml/2008/1/29/208} | 1915 | \url{http://lkml.org/lkml/2008/1/29/208} |
1874 | [Viewed March 27, 2008]" | 1916 | [Viewed March 27, 2008]" |
1875 | ,annotation=" | 1917 | ,annotation={ |
1876 | Patch that prevents preemptible RCU from unnecessarily waking | 1918 | Patch that prevents preemptible RCU from unnecessarily waking |
1877 | up dynticks-idle CPUs. | 1919 | up dynticks-idle CPUs. |
1878 | " | 1920 | } |
1879 | } | 1921 | } |
1880 | 1922 | ||
1881 | @unpublished{PaulEMcKenney2008LKMLDependencyOrdering | 1923 | @unpublished{PaulEMcKenney2008LKMLDependencyOrdering |
@@ -1887,9 +1929,9 @@ Revised: | |||
1887 | ,note="Available: | 1929 | ,note="Available: |
1888 | \url{http://lkml.org/lkml/2008/2/2/255} | 1930 | \url{http://lkml.org/lkml/2008/2/2/255} |
1889 | [Viewed October 18, 2008]" | 1931 | [Viewed October 18, 2008]" |
1890 | ,annotation=" | 1932 | ,annotation={ |
1891 | Explanation of compilers violating dependency ordering. | 1933 | Explanation of compilers violating dependency ordering. |
1892 | " | 1934 | } |
1893 | } | 1935 | } |
1894 | 1936 | ||
1895 | @Conference{PaulEMcKenney2008Beijing | 1937 | @Conference{PaulEMcKenney2008Beijing |
@@ -1916,24 +1958,26 @@ lot of {Linux} into your technology!!!" | |||
1916 | ,note="Available: | 1958 | ,note="Available: |
1917 | \url{http://lwn.net/Articles/279077/} | 1959 | \url{http://lwn.net/Articles/279077/} |
1918 | [Viewed April 24, 2008]" | 1960 | [Viewed April 24, 2008]" |
1919 | ,annotation=" | 1961 | ,annotation={ |
1920 | Describes use of Promela and Spin to validate (and fix!) the | 1962 | Describes use of Promela and Spin to validate (and fix!) the |
1921 | dynticks/RCU interface. | 1963 | dynticks/RCU interface. |
1922 | " | 1964 | } |
1923 | } | 1965 | } |
1924 | 1966 | ||
1925 | @article{DinakarGuniguntala2008IBMSysJ | 1967 | @article{DinakarGuniguntala2008IBMSysJ |
1926 | ,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole" | 1968 | ,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole" |
1927 | ,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}" | 1969 | ,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}" |
1928 | ,Year="2008" | 1970 | ,Year="2008" |
1929 | ,Month="April-June" | 1971 | ,Month="May" |
1930 | ,journal="IBM Systems Journal" | 1972 | ,journal="IBM Systems Journal" |
1931 | ,volume="47" | 1973 | ,volume="47" |
1932 | ,number="2" | 1974 | ,number="2" |
1933 | ,pages="221-236" | 1975 | ,pages="221-236" |
1934 | ,annotation=" | 1976 | ,annotation={ |
1935 | RCU, realtime RCU, sleepable RCU, performance. | 1977 | RCU, realtime RCU, sleepable RCU, performance. |
1936 | " | 1978 | http://www.research.ibm.com/journal/sj/472/guniguntala.pdf |
1979 | [Viewed April 24, 2008] | ||
1980 | } | ||
1937 | } | 1981 | } |
1938 | 1982 | ||
1939 | @unpublished{LaiJiangshan2008NewClassicAlgorithm | 1983 | @unpublished{LaiJiangshan2008NewClassicAlgorithm |
@@ -1945,11 +1989,11 @@ lot of {Linux} into your technology!!!" | |||
1945 | ,note="Available: | 1989 | ,note="Available: |
1946 | \url{http://lkml.org/lkml/2008/6/2/539} | 1990 | \url{http://lkml.org/lkml/2008/6/2/539} |
1947 | [Viewed December 10, 2008]" | 1991 | [Viewed December 10, 2008]" |
1948 | ,annotation=" | 1992 | ,annotation={ |
1949 | Updated RCU classic algorithm. Introduced multi-tailed list | 1993 | Updated RCU classic algorithm. Introduced multi-tailed list |
1950 | for RCU callbacks and also pulling common code into | 1994 | for RCU callbacks and also pulling common code into |
1951 | __call_rcu(). | 1995 | __call_rcu(). |
1952 | " | 1996 | } |
1953 | } | 1997 | } |
1954 | 1998 | ||
1955 | @article{PaulEMcKenney2008RCUOSR | 1999 | @article{PaulEMcKenney2008RCUOSR |
@@ -1966,6 +2010,7 @@ lot of {Linux} into your technology!!!" | |||
1966 | ,address="New York, NY, USA" | 2010 | ,address="New York, NY, USA" |
1967 | ,annotation={ | 2011 | ,annotation={ |
1968 | Linux changed RCU to a far greater degree than RCU has changed Linux. | 2012 | Linux changed RCU to a far greater degree than RCU has changed Linux. |
2013 | http://portal.acm.org/citation.cfm?doid=1400097.1400099 | ||
1969 | } | 2014 | } |
1970 | } | 2015 | } |
1971 | 2016 | ||
@@ -1978,10 +2023,10 @@ lot of {Linux} into your technology!!!" | |||
1978 | ,note="Available: | 2023 | ,note="Available: |
1979 | \url{http://lkml.org/lkml/2008/8/21/336} | 2024 | \url{http://lkml.org/lkml/2008/8/21/336} |
1980 | [Viewed December 8, 2008]" | 2025 | [Viewed December 8, 2008]" |
1981 | ,annotation=" | 2026 | ,annotation={ |
1982 | State-based RCU. One key thing that this patch does is to | 2027 | State-based RCU. One key thing that this patch does is to |
1983 | separate the dynticks handling of NMIs and IRQs. | 2028 | separate the dynticks handling of NMIs and IRQs. |
1984 | " | 2029 | } |
1985 | } | 2030 | } |
1986 | 2031 | ||
1987 | @unpublished{ManfredSpraul2008dyntickIRQNMI | 2032 | @unpublished{ManfredSpraul2008dyntickIRQNMI |
@@ -1993,12 +2038,13 @@ lot of {Linux} into your technology!!!" | |||
1993 | ,note="Available: | 2038 | ,note="Available: |
1994 | \url{http://lkml.org/lkml/2008/9/6/86} | 2039 | \url{http://lkml.org/lkml/2008/9/6/86} |
1995 | [Viewed December 8, 2008]" | 2040 | [Viewed December 8, 2008]" |
1996 | ,annotation=" | 2041 | ,annotation={ |
1997 | Manfred notes a fix required to my attempt to separate irq | 2042 | Manfred notes a fix required to my attempt to separate irq |
1998 | and NMI processing for hierarchical RCU's dynticks interface. | 2043 | and NMI processing for hierarchical RCU's dynticks interface. |
1999 | " | 2044 | } |
2000 | } | 2045 | } |
2001 | 2046 | ||
2047 | # Was PaulEMcKenney2011cyclicRCU | ||
2002 | @techreport{PaulEMcKenney2008cyclicRCU | 2048 | @techreport{PaulEMcKenney2008cyclicRCU |
2003 | ,author="Paul E. McKenney" | 2049 | ,author="Paul E. McKenney" |
2004 | ,title="Efficient Support of Consistent Cyclic Search With Read-Copy Update" | 2050 | ,title="Efficient Support of Consistent Cyclic Search With Read-Copy Update" |
@@ -2008,11 +2054,11 @@ lot of {Linux} into your technology!!!" | |||
2008 | ,number="US Patent 7,426,511" | 2054 | ,number="US Patent 7,426,511" |
2009 | ,month="September" | 2055 | ,month="September" |
2010 | ,pages="23" | 2056 | ,pages="23" |
2011 | ,annotation=" | 2057 | ,annotation={ |
2012 | Maintains an additional level of indirection to allow | 2058 | Maintains an additional level of indirection to allow |
2013 | readers to confine themselves to the desired snapshot of the | 2059 | readers to confine themselves to the desired snapshot of the |
2014 | data structure. Only permits one update at a time. | 2060 | data structure. Only permits one update at a time. |
2015 | " | 2061 | } |
2016 | } | 2062 | } |
2017 | 2063 | ||
2018 | @unpublished{PaulEMcKenney2008HierarchicalRCU | 2064 | @unpublished{PaulEMcKenney2008HierarchicalRCU |
@@ -2021,13 +2067,12 @@ lot of {Linux} into your technology!!!" | |||
2021 | ,month="November" | 2067 | ,month="November" |
2022 | ,day="3" | 2068 | ,day="3" |
2023 | ,year="2008" | 2069 | ,year="2008" |
2024 | ,note="Available: | 2070 | ,note="\url{http://lwn.net/Articles/305782/}" |
2025 | \url{http://lwn.net/Articles/305782/} | 2071 | ,annotation={ |
2026 | [Viewed November 6, 2008]" | ||
2027 | ,annotation=" | ||
2028 | RCU with combining-tree-based grace-period detection, | 2072 | RCU with combining-tree-based grace-period detection, |
2029 | permitting it to handle thousands of CPUs. | 2073 | permitting it to handle thousands of CPUs. |
2030 | " | 2074 | [Viewed November 6, 2008] |
2075 | } | ||
2031 | } | 2076 | } |
2032 | 2077 | ||
2033 | @unpublished{PaulEMcKenney2009BloatwatchRCU | 2078 | @unpublished{PaulEMcKenney2009BloatwatchRCU |
@@ -2039,10 +2084,10 @@ lot of {Linux} into your technology!!!" | |||
2039 | ,note="Available: | 2084 | ,note="Available: |
2040 | \url{http://lkml.org/lkml/2009/1/14/449} | 2085 | \url{http://lkml.org/lkml/2009/1/14/449} |
2041 | [Viewed January 15, 2009]" | 2086 | [Viewed January 15, 2009]" |
2042 | ,annotation=" | 2087 | ,annotation={ |
2043 | Small-footprint implementation of RCU for uniprocessor | 2088 | Small-footprint implementation of RCU for uniprocessor |
2044 | embedded applications -- and also for exposition purposes. | 2089 | embedded applications -- and also for exposition purposes. |
2045 | " | 2090 | } |
2046 | } | 2091 | } |
2047 | 2092 | ||
2048 | @conference{PaulEMcKenney2009MaliciousURCU | 2093 | @conference{PaulEMcKenney2009MaliciousURCU |
@@ -2055,9 +2100,9 @@ lot of {Linux} into your technology!!!" | |||
2055 | ,note="Available: | 2100 | ,note="Available: |
2056 | \url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf} | 2101 | \url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf} |
2057 | [Viewed February 2, 2009]" | 2102 | [Viewed February 2, 2009]" |
2058 | ,annotation=" | 2103 | ,annotation={ |
2059 | Realtime RCU and torture-testing RCU uses. | 2104 | Realtime RCU and torture-testing RCU uses. |
2060 | " | 2105 | } |
2061 | } | 2106 | } |
2062 | 2107 | ||
2063 | @unpublished{MathieuDesnoyers2009URCU | 2108 | @unpublished{MathieuDesnoyers2009URCU |
@@ -2066,16 +2111,14 @@ lot of {Linux} into your technology!!!" | |||
2066 | ,month="February" | 2111 | ,month="February" |
2067 | ,day="5" | 2112 | ,day="5" |
2068 | ,year="2009" | 2113 | ,year="2009" |
2069 | ,note="Available: | 2114 | ,note="\url{http://lttng.org/urcu}" |
2070 | \url{http://lkml.org/lkml/2009/2/5/572} | 2115 | ,annotation={ |
2071 | \url{http://lttng.org/urcu} | ||
2072 | [Viewed February 20, 2009]" | ||
2073 | ,annotation=" | ||
2074 | Mathieu Desnoyers's user-space RCU implementation. | 2116 | Mathieu Desnoyers's user-space RCU implementation. |
2075 | git://lttng.org/userspace-rcu.git | 2117 | git://lttng.org/userspace-rcu.git |
2076 | http://lttng.org/cgi-bin/gitweb.cgi?p=userspace-rcu.git | 2118 | http://lttng.org/cgi-bin/gitweb.cgi?p=userspace-rcu.git |
2077 | http://lttng.org/urcu | 2119 | http://lttng.org/urcu |
2078 | " | 2120 | http://lkml.org/lkml/2009/2/5/572 |
2121 | } | ||
2079 | } | 2122 | } |
2080 | 2123 | ||
2081 | @unpublished{PaulEMcKenney2009LWNBloatWatchRCU | 2124 | @unpublished{PaulEMcKenney2009LWNBloatWatchRCU |
@@ -2087,9 +2130,24 @@ lot of {Linux} into your technology!!!" | |||
2087 | ,note="Available: | 2130 | ,note="Available: |
2088 | \url{http://lwn.net/Articles/323929/} | 2131 | \url{http://lwn.net/Articles/323929/} |
2089 | [Viewed March 20, 2009]" | 2132 | [Viewed March 20, 2009]" |
2090 | ,annotation=" | 2133 | ,annotation={ |
2091 | Uniprocessor assumptions allow simplified RCU implementation. | 2134 | Uniprocessor assumptions allow simplified RCU implementation. |
2092 | " | 2135 | } |
2136 | } | ||
2137 | |||
2138 | @unpublished{EvgeniyPolyakov2009EllipticsNetwork | ||
2139 | ,Author="Evgeniy Polyakov" | ||
2140 | ,Title="The Elliptics Network" | ||
2141 | ,month="April" | ||
2142 | ,day="17" | ||
2143 | ,year="2009" | ||
2144 | ,note="Available: | ||
2145 | \url{http://www.ioremap.net/projects/elliptics} | ||
2146 | [Viewed April 30, 2009]" | ||
2147 | ,annotation={ | ||
2148 | Distributed hash table with transactions, using elliptic | ||
2149 | hash functions to distribute data. | ||
2150 | } | ||
2093 | } | 2151 | } |
2094 | 2152 | ||
2095 | @unpublished{PaulEMcKenney2009expeditedRCU | 2153 | @unpublished{PaulEMcKenney2009expeditedRCU |
@@ -2101,9 +2159,9 @@ lot of {Linux} into your technology!!!" | |||
2101 | ,note="Available: | 2159 | ,note="Available: |
2102 | \url{http://lkml.org/lkml/2009/6/25/306} | 2160 | \url{http://lkml.org/lkml/2009/6/25/306} |
2103 | [Viewed August 16, 2009]" | 2161 | [Viewed August 16, 2009]" |
2104 | ,annotation=" | 2162 | ,annotation={ |
2105 | First posting of expedited RCU to be accepted into -tip. | 2163 | First posting of expedited RCU to be accepted into -tip. |
2106 | " | 2164 | } |
2107 | } | 2165 | } |
2108 | 2166 | ||
2109 | @unpublished{PaulEMcKenney2009fastRTRCU | 2167 | @unpublished{PaulEMcKenney2009fastRTRCU |
@@ -2115,21 +2173,21 @@ lot of {Linux} into your technology!!!" | |||
2115 | ,note="Available: | 2173 | ,note="Available: |
2116 | \url{http://lkml.org/lkml/2009/7/23/294} | 2174 | \url{http://lkml.org/lkml/2009/7/23/294} |
2117 | [Viewed August 15, 2009]" | 2175 | [Viewed August 15, 2009]" |
2118 | ,annotation=" | 2176 | ,annotation={ |
2119 | First posting of simple and fast preemptable RCU. | 2177 | First posting of simple and fast preemptable RCU. |
2120 | " | 2178 | } |
2121 | } | 2179 | } |
2122 | 2180 | ||
2123 | @InProceedings{JoshTriplett2009RPHash | 2181 | @unpublished{JoshTriplett2009RPHash |
2124 | ,Author="Josh Triplett" | 2182 | ,Author="Josh Triplett" |
2125 | ,Title="Scalable concurrent hash tables via relativistic programming" | 2183 | ,Title="Scalable concurrent hash tables via relativistic programming" |
2126 | ,month="September" | 2184 | ,month="September" |
2127 | ,year="2009" | 2185 | ,year="2009" |
2128 | ,booktitle="Linux Plumbers Conference 2009" | 2186 | ,note="Linux Plumbers Conference presentation" |
2129 | ,annotation=" | 2187 | ,annotation={ |
2130 | RP fun with hash tables. | 2188 | RP fun with hash tables. |
2131 | See also JoshTriplett2010RPHash | 2189 | Superseded by JoshTriplett2010RPHash |
2132 | " | 2190 | } |
2133 | } | 2191 | } |
2134 | 2192 | ||
2135 | @phdthesis{MathieuDesnoyersPhD | 2193 | @phdthesis{MathieuDesnoyersPhD |
@@ -2154,9 +2212,9 @@ lot of {Linux} into your technology!!!" | |||
2154 | ,note="Available: | 2212 | ,note="Available: |
2155 | \url{http://wiki.cs.pdx.edu/rp/} | 2213 | \url{http://wiki.cs.pdx.edu/rp/} |
2156 | [Viewed December 9, 2009]" | 2214 | [Viewed December 9, 2009]" |
2157 | ,annotation=" | 2215 | ,annotation={ |
2158 | Main Relativistic Programming Wiki. | 2216 | Main Relativistic Programming Wiki. |
2159 | " | 2217 | } |
2160 | } | 2218 | } |
2161 | 2219 | ||
2162 | @conference{PaulEMcKenney2009DeterministicRCU | 2220 | @conference{PaulEMcKenney2009DeterministicRCU |
@@ -2180,9 +2238,9 @@ lot of {Linux} into your technology!!!" | |||
2180 | ,note="Available: | 2238 | ,note="Available: |
2181 | \url{http://paulmck.livejournal.com/14639.html} | 2239 | \url{http://paulmck.livejournal.com/14639.html} |
2182 | [Viewed June 4, 2010]" | 2240 | [Viewed June 4, 2010]" |
2183 | ,annotation=" | 2241 | ,annotation={ |
2184 | Day-one bug in Tree RCU that took forever to track down. | 2242 | Day-one bug in Tree RCU that took forever to track down. |
2185 | " | 2243 | } |
2186 | } | 2244 | } |
2187 | 2245 | ||
2188 | @unpublished{MathieuDesnoyers2009defer:rcu | 2246 | @unpublished{MathieuDesnoyers2009defer:rcu |
@@ -2193,10 +2251,10 @@ lot of {Linux} into your technology!!!" | |||
2193 | ,note="Available: | 2251 | ,note="Available: |
2194 | \url{http://lkml.org/lkml/2009/10/18/129} | 2252 | \url{http://lkml.org/lkml/2009/10/18/129} |
2195 | [Viewed December 29, 2009]" | 2253 | [Viewed December 29, 2009]" |
2196 | ,annotation=" | 2254 | ,annotation={ |
2197 | Mathieu proposed defer_rcu() with fixed-size per-thread pool | 2255 | Mathieu proposed defer_rcu() with fixed-size per-thread pool |
2198 | of RCU callbacks. | 2256 | of RCU callbacks. |
2199 | " | 2257 | } |
2200 | } | 2258 | } |
2201 | 2259 | ||
2202 | @unpublished{MathieuDesnoyers2009VerifPrePub | 2260 | @unpublished{MathieuDesnoyers2009VerifPrePub |
@@ -2205,10 +2263,10 @@ lot of {Linux} into your technology!!!" | |||
2205 | ,month="December" | 2263 | ,month="December" |
2206 | ,year="2009" | 2264 | ,year="2009" |
2207 | ,note="Submitted to IEEE TPDS" | 2265 | ,note="Submitted to IEEE TPDS" |
2208 | ,annotation=" | 2266 | ,annotation={ |
2209 | OOMem model for Mathieu's user-level RCU mechanical proof of | 2267 | OOMem model for Mathieu's user-level RCU mechanical proof of |
2210 | correctness. | 2268 | correctness. |
2211 | " | 2269 | } |
2212 | } | 2270 | } |
2213 | 2271 | ||
2214 | @unpublished{MathieuDesnoyers2009URCUPrePub | 2272 | @unpublished{MathieuDesnoyers2009URCUPrePub |
@@ -2216,15 +2274,15 @@ lot of {Linux} into your technology!!!" | |||
2216 | ,Title="User-Level Implementations of Read-Copy Update" | 2274 | ,Title="User-Level Implementations of Read-Copy Update" |
2217 | ,month="December" | 2275 | ,month="December" |
2218 | ,year="2010" | 2276 | ,year="2010" |
2219 | ,url=\url{http://www.computer.org/csdl/trans/td/2012/02/ttd2012020375-abs.html} | 2277 | ,url={\url{http://www.computer.org/csdl/trans/td/2012/02/ttd2012020375-abs.html}} |
2220 | ,annotation=" | 2278 | ,annotation={ |
2221 | RCU overview, desiderata, semi-formal semantics, user-level RCU | 2279 | RCU overview, desiderata, semi-formal semantics, user-level RCU |
2222 | usage scenarios, three classes of RCU implementation, wait-free | 2280 | usage scenarios, three classes of RCU implementation, wait-free |
2223 | RCU updates, RCU grace-period batching, update overhead, | 2281 | RCU updates, RCU grace-period batching, update overhead, |
2224 | http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf | 2282 | http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf |
2225 | http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf | 2283 | http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf |
2226 | Superseded by MathieuDesnoyers2012URCU. | 2284 | Superseded by MathieuDesnoyers2012URCU. |
2227 | " | 2285 | } |
2228 | } | 2286 | } |
2229 | 2287 | ||
2230 | @inproceedings{HariKannan2009DynamicAnalysisRCU | 2288 | @inproceedings{HariKannan2009DynamicAnalysisRCU |
@@ -2240,7 +2298,8 @@ lot of {Linux} into your technology!!!" | |||
2240 | ,address = {New York, NY, USA} | 2298 | ,address = {New York, NY, USA} |
2241 | ,annotation={ | 2299 | ,annotation={ |
2242 | Uses RCU to protect metadata used in dynamic analysis. | 2300 | Uses RCU to protect metadata used in dynamic analysis. |
2243 | }} | 2301 | } |
2302 | } | ||
2244 | 2303 | ||
2245 | @conference{PaulEMcKenney2010SimpleOptRCU | 2304 | @conference{PaulEMcKenney2010SimpleOptRCU |
2246 | ,Author="Paul E. McKenney" | 2305 | ,Author="Paul E. McKenney" |
@@ -2252,10 +2311,10 @@ lot of {Linux} into your technology!!!" | |||
2252 | ,note="Available: | 2311 | ,note="Available: |
2253 | \url{http://www.rdrop.com/users/paulmck/RCU/SimplicityThruOptimization.2010.01.21f.pdf} | 2312 | \url{http://www.rdrop.com/users/paulmck/RCU/SimplicityThruOptimization.2010.01.21f.pdf} |
2254 | [Viewed October 10, 2010]" | 2313 | [Viewed October 10, 2010]" |
2255 | ,annotation=" | 2314 | ,annotation={ |
2256 | TREE_PREEMPT_RCU optimizations greatly simplified the old | 2315 | TREE_PREEMPT_RCU optimizations greatly simplified the old |
2257 | PREEMPT_RCU implementation. | 2316 | PREEMPT_RCU implementation. |
2258 | " | 2317 | } |
2259 | } | 2318 | } |
2260 | 2319 | ||
2261 | @unpublished{PaulEMcKenney2010LockdepRCU | 2320 | @unpublished{PaulEMcKenney2010LockdepRCU |
@@ -2264,12 +2323,11 @@ lot of {Linux} into your technology!!!" | |||
2264 | ,month="February" | 2323 | ,month="February" |
2265 | ,year="2010" | 2324 | ,year="2010" |
2266 | ,day="1" | 2325 | ,day="1" |
2267 | ,note="Available: | 2326 | ,note="\url{https://lwn.net/Articles/371986/}" |
2268 | \url{https://lwn.net/Articles/371986/} | 2327 | ,annotation={ |
2269 | [Viewed June 4, 2010]" | ||
2270 | ,annotation=" | ||
2271 | CONFIG_PROVE_RCU, or at least an early version. | 2328 | CONFIG_PROVE_RCU, or at least an early version. |
2272 | " | 2329 | [Viewed June 4, 2010] |
2330 | } | ||
2273 | } | 2331 | } |
2274 | 2332 | ||
2275 | @unpublished{AviKivity2010KVM2RCU | 2333 | @unpublished{AviKivity2010KVM2RCU |
@@ -2280,10 +2338,10 @@ lot of {Linux} into your technology!!!" | |||
2280 | ,note="Available: | 2338 | ,note="Available: |
2281 | \url{http://www.mail-archive.com/kvm@vger.kernel.org/msg28640.html} | 2339 | \url{http://www.mail-archive.com/kvm@vger.kernel.org/msg28640.html} |
2282 | [Viewed March 20, 2010]" | 2340 | [Viewed March 20, 2010]" |
2283 | ,annotation=" | 2341 | ,annotation={ |
2284 | Use of RCU permits KVM to increase the size of guest OSes from | 2342 | Use of RCU permits KVM to increase the size of guest OSes from |
2285 | 16 CPUs to 64 CPUs. | 2343 | 16 CPUs to 64 CPUs. |
2286 | " | 2344 | } |
2287 | } | 2345 | } |
2288 | 2346 | ||
2289 | @unpublished{HerbertXu2010RCUResizeHash | 2347 | @unpublished{HerbertXu2010RCUResizeHash |
@@ -2297,7 +2355,19 @@ lot of {Linux} into your technology!!!" | |||
2297 | ,annotation={ | 2355 | ,annotation={ |
2298 | Use a pair of list_head structures to support RCU-protected | 2356 | Use a pair of list_head structures to support RCU-protected |
2299 | resizable hash tables. | 2357 | resizable hash tables. |
2300 | }} | 2358 | } |
2359 | } | ||
2360 | |||
2361 | @mastersthesis{AbhinavDuggal2010Masters | ||
2362 | ,author="Abhinav Duggal" | ||
2363 | ,title="Stopping Data Races Using Redflag" | ||
2364 | ,school="Stony Brook University" | ||
2365 | ,year="2010" | ||
2366 | ,annotation={ | ||
2367 | Data-race detector incorporating RCU. | ||
2368 | http://www.filesystems.org/docs/abhinav-thesis/abhinav_thesis.pdf | ||
2369 | } | ||
2370 | } | ||
2301 | 2371 | ||
2302 | @article{JoshTriplett2010RPHash | 2372 | @article{JoshTriplett2010RPHash |
2303 | ,author="Josh Triplett and Paul E. McKenney and Jonathan Walpole" | 2373 | ,author="Josh Triplett and Paul E. McKenney and Jonathan Walpole" |
@@ -2310,7 +2380,8 @@ lot of {Linux} into your technology!!!" | |||
2310 | ,annotation={ | 2380 | ,annotation={ |
2311 | RP fun with hash tables. | 2381 | RP fun with hash tables. |
2312 | http://portal.acm.org/citation.cfm?id=1842733.1842750 | 2382 | http://portal.acm.org/citation.cfm?id=1842733.1842750 |
2313 | }} | 2383 | } |
2384 | } | ||
2314 | 2385 | ||
2315 | @unpublished{PaulEMcKenney2010RCUAPI | 2386 | @unpublished{PaulEMcKenney2010RCUAPI |
2316 | ,Author="Paul E. McKenney" | 2387 | ,Author="Paul E. McKenney" |
@@ -2318,12 +2389,11 @@ lot of {Linux} into your technology!!!" | |||
2318 | ,month="December" | 2389 | ,month="December" |
2319 | ,day="8" | 2390 | ,day="8" |
2320 | ,year="2010" | 2391 | ,year="2010" |
2321 | ,note="Available: | 2392 | ,note="\url{http://lwn.net/Articles/418853/}" |
2322 | \url{http://lwn.net/Articles/418853/} | 2393 | ,annotation={ |
2323 | [Viewed December 8, 2010]" | ||
2324 | ,annotation=" | ||
2325 | Includes updated software-engineering features. | 2394 | Includes updated software-engineering features. |
2326 | " | 2395 | [Viewed December 8, 2010] |
2396 | } | ||
2327 | } | 2397 | } |
2328 | 2398 | ||
2329 | @mastersthesis{AndrejPodzimek2010masters | 2399 | @mastersthesis{AndrejPodzimek2010masters |
@@ -2338,7 +2408,8 @@ lot of {Linux} into your technology!!!" | |||
2338 | Reviews RCU implementations and creates a few for OpenSolaris. | 2408 | Reviews RCU implementations and creates a few for OpenSolaris. |
2339 | Drives quiescent-state detection from RCU read-side primitives, | 2409 | Drives quiescent-state detection from RCU read-side primitives, |
2340 | in a manner roughly similar to that of Jim Houston. | 2410 | in a manner roughly similar to that of Jim Houston. |
2341 | }} | 2411 | } |
2412 | } | ||
2342 | 2413 | ||
2343 | @unpublished{LinusTorvalds2011Linux2:6:38:rc1:NPigginVFS | 2414 | @unpublished{LinusTorvalds2011Linux2:6:38:rc1:NPigginVFS |
2344 | ,Author="Linus Torvalds" | 2415 | ,Author="Linus Torvalds" |
@@ -2358,7 +2429,8 @@ lot of {Linux} into your technology!!!" | |||
2358 | of the most expensive parts of path component lookup, which was the | 2429 | of the most expensive parts of path component lookup, which was the |
2359 | d_lock on every component lookup. So I'm seeing improvements of 30-50% | 2430 | d_lock on every component lookup. So I'm seeing improvements of 30-50% |
2360 | on some seriously pathname-lookup intensive loads." | 2431 | on some seriously pathname-lookup intensive loads." |
2361 | }} | 2432 | } |
2433 | } | ||
2362 | 2434 | ||
2363 | @techreport{JoshTriplett2011RPScalableCorrectOrdering | 2435 | @techreport{JoshTriplett2011RPScalableCorrectOrdering |
2364 | ,author = {Josh Triplett and Philip W. Howard and Paul E. McKenney and Jonathan Walpole} | 2436 | ,author = {Josh Triplett and Philip W. Howard and Paul E. McKenney and Jonathan Walpole} |
@@ -2392,12 +2464,12 @@ lot of {Linux} into your technology!!!" | |||
2392 | ,number="US Patent 7,953,778" | 2464 | ,number="US Patent 7,953,778" |
2393 | ,month="May" | 2465 | ,month="May" |
2394 | ,pages="34" | 2466 | ,pages="34" |
2395 | ,annotation=" | 2467 | ,annotation={ |
2396 | Maintains an array of generation numbers to track in-flight | 2468 | Maintains an array of generation numbers to track in-flight |
2397 | updates and keeps an additional level of indirection to allow | 2469 | updates and keeps an additional level of indirection to allow |
2398 | readers to confine themselves to the desired snapshot of the | 2470 | readers to confine themselves to the desired snapshot of the |
2399 | data structure. | 2471 | data structure. |
2400 | " | 2472 | } |
2401 | } | 2473 | } |
2402 | 2474 | ||
2403 | @inproceedings{Triplett:2011:RPHash | 2475 | @inproceedings{Triplett:2011:RPHash |
@@ -2408,7 +2480,7 @@ lot of {Linux} into your technology!!!" | |||
2408 | ,year = {2011} | 2480 | ,year = {2011} |
2409 | ,pages = {145--158} | 2481 | ,pages = {145--158} |
2410 | ,numpages = {14} | 2482 | ,numpages = {14} |
2411 | ,url={http://www.usenix.org/event/atc11/tech/final_files/atc11_proceedings.pdf} | 2483 | ,url={http://www.usenix.org/event/atc11/tech/final_files/Triplett.pdf} |
2412 | ,publisher = {The USENIX Association} | 2484 | ,publisher = {The USENIX Association} |
2413 | ,address = {Portland, OR USA} | 2485 | ,address = {Portland, OR USA} |
2414 | } | 2486 | } |
@@ -2419,27 +2491,58 @@ lot of {Linux} into your technology!!!" | |||
2419 | ,month="July" | 2491 | ,month="July" |
2420 | ,day="27" | 2492 | ,day="27" |
2421 | ,year="2011" | 2493 | ,year="2011" |
2422 | ,note="Available: | 2494 | ,note="\url{http://lwn.net/Articles/453002/}" |
2423 | \url{http://lwn.net/Articles/453002/} | 2495 | ,annotation={ |
2424 | [Viewed July 27, 2011]" | ||
2425 | ,annotation=" | ||
2426 | Analysis of the RCU trainwreck in Linux kernel 3.0. | 2496 | Analysis of the RCU trainwreck in Linux kernel 3.0. |
2427 | " | 2497 | [Viewed July 27, 2011] |
2498 | } | ||
2428 | } | 2499 | } |
2429 | 2500 | ||
2430 | @unpublished{NeilBrown2011MeetTheLockers | 2501 | @unpublished{NeilBrown2011MeetTheLockers |
2431 | ,Author="Neil Brown" | 2502 | ,Author="Neil Brown" |
2432 | ,Title="Meet the Lockers" | 2503 | ,Title="Meet the {Lockers}" |
2433 | ,month="August" | 2504 | ,month="August" |
2434 | ,day="3" | 2505 | ,day="3" |
2435 | ,year="2011" | 2506 | ,year="2011" |
2436 | ,note="Available: | 2507 | ,note="Available: |
2437 | \url{http://lwn.net/Articles/453685/} | 2508 | \url{http://lwn.net/Articles/453685/} |
2438 | [Viewed September 2, 2011]" | 2509 | [Viewed September 2, 2011]" |
2439 | ,annotation=" | 2510 | ,annotation={ |
2440 | The Locker family as an analogy for locking, reference counting, | 2511 | The Locker family as an analogy for locking, reference counting, |
2441 | RCU, and seqlock. | 2512 | RCU, and seqlock. |
2442 | " | 2513 | } |
2514 | } | ||
2515 | |||
2516 | @inproceedings{Seyster:2011:RFA:2075416.2075425 | ||
2517 | ,author = {Seyster, Justin and Radhakrishnan, Prabakar and Katoch, Samriti and Duggal, Abhinav and Stoller, Scott D. and Zadok, Erez} | ||
2518 | ,title = {Redflag: a framework for analysis of Kernel-level concurrency} | ||
2519 | ,booktitle = {Proceedings of the 11th international conference on Algorithms and architectures for parallel processing - Volume Part I} | ||
2520 | ,series = {ICA3PP'11} | ||
2521 | ,year = {2011} | ||
2522 | ,isbn = {978-3-642-24649-4} | ||
2523 | ,location = {Melbourne, Australia} | ||
2524 | ,pages = {66--79} | ||
2525 | ,numpages = {14} | ||
2526 | ,url = {http://dl.acm.org/citation.cfm?id=2075416.2075425} | ||
2527 | ,acmid = {2075425} | ||
2528 | ,publisher = {Springer-Verlag} | ||
2529 | ,address = {Berlin, Heidelberg} | ||
2530 | } | ||
2531 | |||
2532 | @phdthesis{JoshTriplettPhD | ||
2533 | ,author="Josh Triplett" | ||
2534 | ,title="Relativistic Causal Ordering: A Memory Model for Scalable Concurrent Data Structures" | ||
2535 | ,school="Portland State University" | ||
2536 | ,year="2012" | ||
2537 | ,annotation={ | ||
2538 | RCU-protected hash tables, barriers vs. read-side traversal order. | ||
2539 | . | ||
2540 | If the updater is making changes in the opposite direction from | ||
2541 | the read-side traveral order, the updater need only execute a | ||
2542 | memory-barrier instruction, but if in the same direction, the | ||
2543 | updater needs to wait for a grace period between the individual | ||
2544 | updates. | ||
2545 | } | ||
2443 | } | 2546 | } |
2444 | 2547 | ||
2445 | @article{MathieuDesnoyers2012URCU | 2548 | @article{MathieuDesnoyers2012URCU |
@@ -2459,5 +2562,150 @@ lot of {Linux} into your technology!!!" | |||
2459 | RCU updates, RCU grace-period batching, update overhead, | 2562 | RCU updates, RCU grace-period batching, update overhead, |
2460 | http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf | 2563 | http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf |
2461 | http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf | 2564 | http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf |
2565 | http://www.computer.org/cms/Computer.org/dl/trans/td/2012/02/extras/ttd2012020375s.pdf | ||
2566 | } | ||
2567 | } | ||
2568 | |||
2569 | @inproceedings{AustinClements2012RCULinux:mmapsem | ||
2570 | ,author = {Austin Clements and Frans Kaashoek and Nickolai Zeldovich} | ||
2571 | ,title = {Scalable Address Spaces Using {RCU} Balanced Trees} | ||
2572 | ,booktitle = {Architectural Support for Programming Languages and Operating Systems (ASPLOS 2012)} | ||
2573 | ,month = {March} | ||
2574 | ,year = {2012} | ||
2575 | ,pages = {199--210} | ||
2576 | ,numpages = {12} | ||
2577 | ,publisher = {ACM} | ||
2578 | ,address = {London, UK} | ||
2579 | ,url="http://people.csail.mit.edu/nickolai/papers/clements-bonsai.pdf" | ||
2580 | } | ||
2581 | |||
2582 | @unpublished{PaulEMcKenney2012ELCbattery | ||
2583 | ,Author="Paul E. McKenney" | ||
2584 | ,Title="Making {RCU} Safe For Battery-Powered Devices" | ||
2585 | ,month="February" | ||
2586 | ,day="15" | ||
2587 | ,year="2012" | ||
2588 | ,note="Available: | ||
2589 | \url{http://www.rdrop.com/users/paulmck/RCU/RCUdynticks.2012.02.15b.pdf} | ||
2590 | [Viewed March 1, 2012]" | ||
2591 | ,annotation={ | ||
2592 | RCU_FAST_NO_HZ, round 2. | ||
2593 | } | ||
2594 | } | ||
2595 | |||
2596 | @article{GuillermoVigueras2012RCUCrowd | ||
2597 | ,author = {Vigueras, Guillermo and Ordu\~{n}a, Juan M. and Lozano, Miguel} | ||
2598 | ,day = {25} | ||
2599 | ,doi = {10.1007/s11227-012-0766-x} | ||
2600 | ,issn = {0920-8542} | ||
2601 | ,journal = {The Journal of Supercomputing} | ||
2602 | ,keywords = {linux, simulation} | ||
2603 | ,month = apr | ||
2604 | ,posted-at = {2012-05-03 09:12:04} | ||
2605 | ,priority = {2} | ||
2606 | ,title = {{A Read-Copy Update based parallel server for distributed crowd simulations}} | ||
2607 | ,url = {http://dx.doi.org/10.1007/s11227-012-0766-x} | ||
2608 | ,year = {2012} | ||
2609 | } | ||
2610 | |||
2611 | |||
2612 | @unpublished{JonCorbet2012ACCESS:ONCE | ||
2613 | ,Author="Jon Corbet" | ||
2614 | ,Title="{ACCESS\_ONCE()}" | ||
2615 | ,month="August" | ||
2616 | ,day="1" | ||
2617 | ,year="2012" | ||
2618 | ,note="\url{http://lwn.net/Articles/508991/}" | ||
2619 | ,annotation={ | ||
2620 | A couple of simple specific compiler optimizations that motivate | ||
2621 | ACCESS_ONCE(). | ||
2622 | } | ||
2623 | } | ||
2624 | |||
2625 | @unpublished{AlexeyGotsman2012VerifyGraceExtended | ||
2626 | ,Author="Alexey Gotsman and Noam Rinetzky and Hongseok Yang" | ||
2627 | ,Title="Verifying Highly Concurrent Algorithms with Grace (extended version)" | ||
2628 | ,month="July" | ||
2629 | ,day="10" | ||
2630 | ,year="2012" | ||
2631 | ,note="\url{http://software.imdea.org/~gotsman/papers/recycling-esop13-ext.pdf}" | ||
2632 | ,annotation={ | ||
2633 | Separation-logic formulation of RCU uses. | ||
2634 | } | ||
2635 | } | ||
2636 | |||
2637 | @unpublished{PaulMcKenney2012RCUUsage | ||
2638 | ,Author="Paul E. McKenney and Silas Boyd-Wickizer and Jonathan Walpole" | ||
2639 | ,Title="{RCU} Usage In the Linux Kernel: One Decade Later" | ||
2640 | ,month="September" | ||
2641 | ,day="17" | ||
2642 | ,year="2012" | ||
2643 | ,url=http://rdrop.com/users/paulmck/techreports/survey.2012.09.17a.pdf | ||
2644 | ,note="Technical report paulmck.2012.09.17" | ||
2645 | ,annotation={ | ||
2646 | Overview of the first variant of no-CBs CPUs for RCU. | ||
2647 | } | ||
2648 | } | ||
2649 | |||
2650 | @unpublished{JonCorbet2012NOCB | ||
2651 | ,Author="Jon Corbet" | ||
2652 | ,Title="Relocating RCU callbacks" | ||
2653 | ,month="October" | ||
2654 | ,day="31" | ||
2655 | ,year="2012" | ||
2656 | ,note="\url{http://lwn.net/Articles/522262/}" | ||
2657 | ,annotation={ | ||
2658 | Overview of the first variant of no-CBs CPUs for RCU. | ||
2659 | } | ||
2660 | } | ||
2661 | |||
2662 | @phdthesis{JustinSeyster2012PhD | ||
2663 | ,author="Justin Seyster" | ||
2664 | ,title="Runtime Verification of Kernel-Level Concurrency Using Compiler-Based Instrumentation" | ||
2665 | ,school="Stony Brook University" | ||
2666 | ,year="2012" | ||
2667 | ,annotation={ | ||
2668 | Looking for data races, including those involving RCU. | ||
2669 | Proposal: | ||
2670 | http://www.fsl.cs.sunysb.edu/docs/jseyster-proposal/redflag.pdf | ||
2671 | Dissertation: | ||
2672 | http://www.fsl.cs.sunysb.edu/docs/jseyster-dissertation/redflag.pdf | ||
2673 | } | ||
2674 | } | ||
2675 | |||
2676 | @unpublished{PaulEMcKenney2013RCUUsage | ||
2677 | ,Author="Paul E. McKenney and Silas Boyd-Wickizer and Jonathan Walpole" | ||
2678 | ,Title="{RCU} Usage in the {Linux} Kernel: One Decade Later" | ||
2679 | ,month="February" | ||
2680 | ,day="24" | ||
2681 | ,year="2013" | ||
2682 | ,note="\url{http://rdrop.com/users/paulmck/techreports/RCUUsage.2013.02.24a.pdf}" | ||
2683 | ,annotation={ | ||
2684 | Usage of RCU within the Linux kernel. | ||
2685 | } | ||
2686 | } | ||
2687 | |||
2688 | @inproceedings{AlexeyGotsman2013ESOPRCU | ||
2689 | ,author = {Alexey Gotsman and Noam Rinetzky and Hongseok Yang} | ||
2690 | ,title = {Verifying concurrent memory reclamation algorithms with grace} | ||
2691 | ,booktitle = {ESOP'13: European Symposium on Programming} | ||
2692 | ,year = {2013} | ||
2693 | ,pages = {249--269} | ||
2694 | ,publisher = {Springer} | ||
2695 | ,address = {Rome, Italy} | ||
2696 | ,annotation={ | ||
2697 | http://software.imdea.org/~gotsman/papers/recycling-esop13.pdf | ||
2698 | } | ||
2699 | } | ||
2700 | |||
2701 | @unpublished{PaulEMcKenney2013NoTinyPreempt | ||
2702 | ,Author="Paul E. McKenney" | ||
2703 | ,Title="Simplifying RCU" | ||
2704 | ,month="March" | ||
2705 | ,day="6" | ||
2706 | ,year="2013" | ||
2707 | ,note="\url{http://lwn.net/Articles/541037/}" | ||
2708 | ,annotation={ | ||
2709 | Getting rid of TINY_PREEMPT_RCU. | ||
2462 | } | 2710 | } |
2463 | } | 2711 | } |
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt index 2e319d1b9ef2..b10cfe711e68 100644 --- a/Documentation/RCU/rcubarrier.txt +++ b/Documentation/RCU/rcubarrier.txt | |||
@@ -70,10 +70,14 @@ in realtime kernels in order to avoid excessive scheduling latencies. | |||
70 | 70 | ||
71 | rcu_barrier() | 71 | rcu_barrier() |
72 | 72 | ||
73 | We instead need the rcu_barrier() primitive. This primitive is similar | 73 | We instead need the rcu_barrier() primitive. Rather than waiting for |
74 | to synchronize_rcu(), but instead of waiting solely for a grace | 74 | a grace period to elapse, rcu_barrier() waits for all outstanding RCU |
75 | period to elapse, it also waits for all outstanding RCU callbacks to | 75 | callbacks to complete. Please note that rcu_barrier() does -not- imply |
76 | complete. Pseudo-code using rcu_barrier() is as follows: | 76 | synchronize_rcu(), in particular, if there are no RCU callbacks queued |
77 | anywhere, rcu_barrier() is within its rights to return immediately, | ||
78 | without waiting for a grace period to elapse. | ||
79 | |||
80 | Pseudo-code using rcu_barrier() is as follows: | ||
77 | 81 | ||
78 | 1. Prevent any new RCU callbacks from being posted. | 82 | 1. Prevent any new RCU callbacks from being posted. |
79 | 2. Execute rcu_barrier(). | 83 | 2. Execute rcu_barrier(). |
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt index d8a502387397..dac02a6219b1 100644 --- a/Documentation/RCU/torture.txt +++ b/Documentation/RCU/torture.txt | |||
@@ -42,6 +42,16 @@ fqs_holdoff Holdoff time (in microseconds) between consecutive calls | |||
42 | fqs_stutter Wait time (in seconds) between consecutive bursts | 42 | fqs_stutter Wait time (in seconds) between consecutive bursts |
43 | of calls to force_quiescent_state(). | 43 | of calls to force_quiescent_state(). |
44 | 44 | ||
45 | gp_normal Make the fake writers use normal synchronous grace-period | ||
46 | primitives. | ||
47 | |||
48 | gp_exp Make the fake writers use expedited synchronous grace-period | ||
49 | primitives. If both gp_normal and gp_exp are set, or | ||
50 | if neither gp_normal nor gp_exp are set, then randomly | ||
51 | choose the primitive so that about 50% are normal and | ||
52 | 50% expedited. By default, neither are set, which | ||
53 | gives best overall test coverage. | ||
54 | |||
45 | irqreader Says to invoke RCU readers from irq level. This is currently | 55 | irqreader Says to invoke RCU readers from irq level. This is currently |
46 | done via timers. Defaults to "1" for variants of RCU that | 56 | done via timers. Defaults to "1" for variants of RCU that |
47 | permit this. (Or, more accurately, variants of RCU that do | 57 | permit this. (Or, more accurately, variants of RCU that do |
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index fa5d8a9ae205..c8c42e64e953 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -531,9 +531,10 @@ dependency barrier to make it work correctly. Consider the following bit of | |||
531 | code: | 531 | code: |
532 | 532 | ||
533 | q = &a; | 533 | q = &a; |
534 | if (p) | 534 | if (p) { |
535 | <data dependency barrier> | ||
535 | q = &b; | 536 | q = &b; |
536 | <data dependency barrier> | 537 | } |
537 | x = *q; | 538 | x = *q; |
538 | 539 | ||
539 | This will not have the desired effect because there is no actual data | 540 | This will not have the desired effect because there is no actual data |
@@ -542,9 +543,10 @@ attempting to predict the outcome in advance. In such a case what's actually | |||
542 | required is: | 543 | required is: |
543 | 544 | ||
544 | q = &a; | 545 | q = &a; |
545 | if (p) | 546 | if (p) { |
547 | <read barrier> | ||
546 | q = &b; | 548 | q = &b; |
547 | <read barrier> | 549 | } |
548 | x = *q; | 550 | x = *q; |
549 | 551 | ||
550 | 552 | ||
diff --git a/Documentation/timers/NO_HZ.txt b/Documentation/timers/NO_HZ.txt index 88697584242b..cca122f25120 100644 --- a/Documentation/timers/NO_HZ.txt +++ b/Documentation/timers/NO_HZ.txt | |||
@@ -24,8 +24,8 @@ There are three main ways of managing scheduling-clock interrupts | |||
24 | workloads, you will normally -not- want this option. | 24 | workloads, you will normally -not- want this option. |
25 | 25 | ||
26 | These three cases are described in the following three sections, followed | 26 | These three cases are described in the following three sections, followed |
27 | by a third section on RCU-specific considerations and a fourth and final | 27 | by a third section on RCU-specific considerations, a fourth section |
28 | section listing known issues. | 28 | discussing testing, and a fifth and final section listing known issues. |
29 | 29 | ||
30 | 30 | ||
31 | NEVER OMIT SCHEDULING-CLOCK TICKS | 31 | NEVER OMIT SCHEDULING-CLOCK TICKS |
@@ -121,14 +121,15 @@ boot parameter specifies the adaptive-ticks CPUs. For example, | |||
121 | "nohz_full=1,6-8" says that CPUs 1, 6, 7, and 8 are to be adaptive-ticks | 121 | "nohz_full=1,6-8" says that CPUs 1, 6, 7, and 8 are to be adaptive-ticks |
122 | CPUs. Note that you are prohibited from marking all of the CPUs as | 122 | CPUs. Note that you are prohibited from marking all of the CPUs as |
123 | adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain | 123 | adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain |
124 | online to handle timekeeping tasks in order to ensure that system calls | 124 | online to handle timekeeping tasks in order to ensure that system |
125 | like gettimeofday() returns accurate values on adaptive-tick CPUs. | 125 | calls like gettimeofday() returns accurate values on adaptive-tick CPUs. |
126 | (This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no | 126 | (This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running |
127 | running user processes to observe slight drifts in clock rate.) | 127 | user processes to observe slight drifts in clock rate.) Therefore, the |
128 | Therefore, the boot CPU is prohibited from entering adaptive-ticks | 128 | boot CPU is prohibited from entering adaptive-ticks mode. Specifying a |
129 | mode. Specifying a "nohz_full=" mask that includes the boot CPU will | 129 | "nohz_full=" mask that includes the boot CPU will result in a boot-time |
130 | result in a boot-time error message, and the boot CPU will be removed | 130 | error message, and the boot CPU will be removed from the mask. Note that |
131 | from the mask. | 131 | this means that your system must have at least two CPUs in order for |
132 | CONFIG_NO_HZ_FULL=y to do anything for you. | ||
132 | 133 | ||
133 | Alternatively, the CONFIG_NO_HZ_FULL_ALL=y Kconfig parameter specifies | 134 | Alternatively, the CONFIG_NO_HZ_FULL_ALL=y Kconfig parameter specifies |
134 | that all CPUs other than the boot CPU are adaptive-ticks CPUs. This | 135 | that all CPUs other than the boot CPU are adaptive-ticks CPUs. This |
@@ -232,6 +233,29 @@ scheduler will decide where to run them, which might or might not be | |||
232 | where you want them to run. | 233 | where you want them to run. |
233 | 234 | ||
234 | 235 | ||
236 | TESTING | ||
237 | |||
238 | So you enable all the OS-jitter features described in this document, | ||
239 | but do not see any change in your workload's behavior. Is this because | ||
240 | your workload isn't affected that much by OS jitter, or is it because | ||
241 | something else is in the way? This section helps answer this question | ||
242 | by providing a simple OS-jitter test suite, which is available on branch | ||
243 | master of the following git archive: | ||
244 | |||
245 | git://git.kernel.org/pub/scm/linux/kernel/git/frederic/dynticks-testing.git | ||
246 | |||
247 | Clone this archive and follow the instructions in the README file. | ||
248 | This test procedure will produce a trace that will allow you to evaluate | ||
249 | whether or not you have succeeded in removing OS jitter from your system. | ||
250 | If this trace shows that you have removed OS jitter as much as is | ||
251 | possible, then you can conclude that your workload is not all that | ||
252 | sensitive to OS jitter. | ||
253 | |||
254 | Note: this test requires that your system have at least two CPUs. | ||
255 | We do not currently have a good way to remove OS jitter from single-CPU | ||
256 | systems. | ||
257 | |||
258 | |||
235 | KNOWN ISSUES | 259 | KNOWN ISSUES |
236 | 260 | ||
237 | o Dyntick-idle slows transitions to and from idle slightly. | 261 | o Dyntick-idle slows transitions to and from idle slightly. |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 69732d279e8b..83e2c31e8b00 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -122,8 +122,12 @@ | |||
122 | #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ | 122 | #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ |
123 | *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ | 123 | *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ |
124 | VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; | 124 | VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; |
125 | #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ | ||
126 | *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ | ||
127 | VMLINUX_SYMBOL(__stop___tracepoint_str) = .; | ||
125 | #else | 128 | #else |
126 | #define TRACE_PRINTKS() | 129 | #define TRACE_PRINTKS() |
130 | #define TRACEPOINT_STR() | ||
127 | #endif | 131 | #endif |
128 | 132 | ||
129 | #ifdef CONFIG_FTRACE_SYSCALLS | 133 | #ifdef CONFIG_FTRACE_SYSCALLS |
@@ -190,7 +194,8 @@ | |||
190 | VMLINUX_SYMBOL(__stop___verbose) = .; \ | 194 | VMLINUX_SYMBOL(__stop___verbose) = .; \ |
191 | LIKELY_PROFILE() \ | 195 | LIKELY_PROFILE() \ |
192 | BRANCH_PROFILE() \ | 196 | BRANCH_PROFILE() \ |
193 | TRACE_PRINTKS() | 197 | TRACE_PRINTKS() \ |
198 | TRACEPOINT_STR() | ||
194 | 199 | ||
195 | /* | 200 | /* |
196 | * Data section helpers | 201 | * Data section helpers |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 0e5f5785d9f2..98ffcbd4888e 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
@@ -63,7 +63,7 @@ struct debug_obj_descr { | |||
63 | extern void debug_object_init (void *addr, struct debug_obj_descr *descr); | 63 | extern void debug_object_init (void *addr, struct debug_obj_descr *descr); |
64 | extern void | 64 | extern void |
65 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); | 65 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); |
66 | extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); | 66 | extern int debug_object_activate (void *addr, struct debug_obj_descr *descr); |
67 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | 67 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); |
68 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 68 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
69 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 69 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
@@ -85,8 +85,8 @@ static inline void | |||
85 | debug_object_init (void *addr, struct debug_obj_descr *descr) { } | 85 | debug_object_init (void *addr, struct debug_obj_descr *descr) { } |
86 | static inline void | 86 | static inline void |
87 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } | 87 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } |
88 | static inline void | 88 | static inline int |
89 | debug_object_activate (void *addr, struct debug_obj_descr *descr) { } | 89 | debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; } |
90 | static inline void | 90 | static inline void |
91 | debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } | 91 | debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } |
92 | static inline void | 92 | static inline void |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 120d57a1c3a5..5eaa746735ff 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -359,6 +359,40 @@ do { \ | |||
359 | __trace_printk(ip, fmt, ##args); \ | 359 | __trace_printk(ip, fmt, ##args); \ |
360 | } while (0) | 360 | } while (0) |
361 | 361 | ||
362 | /** | ||
363 | * tracepoint_string - register constant persistent string to trace system | ||
364 | * @str - a constant persistent string that will be referenced in tracepoints | ||
365 | * | ||
366 | * If constant strings are being used in tracepoints, it is faster and | ||
367 | * more efficient to just save the pointer to the string and reference | ||
368 | * that with a printf "%s" instead of saving the string in the ring buffer | ||
369 | * and wasting space and time. | ||
370 | * | ||
371 | * The problem with the above approach is that userspace tools that read | ||
372 | * the binary output of the trace buffers do not have access to the string. | ||
373 | * Instead they just show the address of the string which is not very | ||
374 | * useful to users. | ||
375 | * | ||
376 | * With tracepoint_string(), the string will be registered to the tracing | ||
377 | * system and exported to userspace via the debugfs/tracing/printk_formats | ||
378 | * file that maps the string address to the string text. This way userspace | ||
379 | * tools that read the binary buffers have a way to map the pointers to | ||
380 | * the ASCII strings they represent. | ||
381 | * | ||
382 | * The @str used must be a constant string and persistent as it would not | ||
383 | * make sense to show a string that no longer exists. But it is still fine | ||
384 | * to be used with modules, because when modules are unloaded, if they | ||
385 | * had tracepoints, the ring buffers are cleared too. As long as the string | ||
386 | * does not change during the life of the module, it is fine to use | ||
387 | * tracepoint_string() within a module. | ||
388 | */ | ||
389 | #define tracepoint_string(str) \ | ||
390 | ({ \ | ||
391 | static const char *___tp_str __tracepoint_string = str; \ | ||
392 | ___tp_str; \ | ||
393 | }) | ||
394 | #define __tracepoint_string __attribute__((section("__tracepoint_str"))) | ||
395 | |||
362 | #ifdef CONFIG_PERF_EVENTS | 396 | #ifdef CONFIG_PERF_EVENTS |
363 | struct perf_event; | 397 | struct perf_event; |
364 | 398 | ||
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 97ba4e78a37e..d235e88cfd7c 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -101,13 +101,13 @@ static inline u64 get_jiffies_64(void) | |||
101 | #define time_after(a,b) \ | 101 | #define time_after(a,b) \ |
102 | (typecheck(unsigned long, a) && \ | 102 | (typecheck(unsigned long, a) && \ |
103 | typecheck(unsigned long, b) && \ | 103 | typecheck(unsigned long, b) && \ |
104 | ((long)(b) - (long)(a) < 0)) | 104 | ((long)((b) - (a)) < 0)) |
105 | #define time_before(a,b) time_after(b,a) | 105 | #define time_before(a,b) time_after(b,a) |
106 | 106 | ||
107 | #define time_after_eq(a,b) \ | 107 | #define time_after_eq(a,b) \ |
108 | (typecheck(unsigned long, a) && \ | 108 | (typecheck(unsigned long, a) && \ |
109 | typecheck(unsigned long, b) && \ | 109 | typecheck(unsigned long, b) && \ |
110 | ((long)(a) - (long)(b) >= 0)) | 110 | ((long)((a) - (b)) >= 0)) |
111 | #define time_before_eq(a,b) time_after_eq(b,a) | 111 | #define time_before_eq(a,b) time_after_eq(b,a) |
112 | 112 | ||
113 | /* | 113 | /* |
@@ -130,13 +130,13 @@ static inline u64 get_jiffies_64(void) | |||
130 | #define time_after64(a,b) \ | 130 | #define time_after64(a,b) \ |
131 | (typecheck(__u64, a) && \ | 131 | (typecheck(__u64, a) && \ |
132 | typecheck(__u64, b) && \ | 132 | typecheck(__u64, b) && \ |
133 | ((__s64)(b) - (__s64)(a) < 0)) | 133 | ((__s64)((b) - (a)) < 0)) |
134 | #define time_before64(a,b) time_after64(b,a) | 134 | #define time_before64(a,b) time_after64(b,a) |
135 | 135 | ||
136 | #define time_after_eq64(a,b) \ | 136 | #define time_after_eq64(a,b) \ |
137 | (typecheck(__u64, a) && \ | 137 | (typecheck(__u64, a) && \ |
138 | typecheck(__u64, b) && \ | 138 | typecheck(__u64, b) && \ |
139 | ((__s64)(a) - (__s64)(b) >= 0)) | 139 | ((__s64)((a) - (b)) >= 0)) |
140 | #define time_before_eq64(a,b) time_after_eq64(b,a) | 140 | #define time_before_eq64(a,b) time_after_eq64(b,a) |
141 | 141 | ||
142 | #define time_in_range64(a, b, c) \ | 142 | #define time_in_range64(a, b, c) \ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index f4b1001a4676..4106721c4e5e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
267 | */ | 267 | */ |
268 | #define list_first_or_null_rcu(ptr, type, member) \ | 268 | #define list_first_or_null_rcu(ptr, type, member) \ |
269 | ({struct list_head *__ptr = (ptr); \ | 269 | ({struct list_head *__ptr = (ptr); \ |
270 | struct list_head __rcu *__next = list_next_rcu(__ptr); \ | 270 | struct list_head *__next = ACCESS_ONCE(__ptr->next); \ |
271 | likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \ | 271 | likely(__ptr != __next) ? \ |
272 | list_entry_rcu(__next, type, member) : NULL; \ | ||
272 | }) | 273 | }) |
273 | 274 | ||
274 | /** | 275 | /** |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4b14bdc911d7..f1f1bc39346b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,7 +52,7 @@ extern int rcutorture_runnable; /* for sysctl */ | |||
52 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 52 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
53 | extern void rcutorture_record_test_transition(void); | 53 | extern void rcutorture_record_test_transition(void); |
54 | extern void rcutorture_record_progress(unsigned long vernum); | 54 | extern void rcutorture_record_progress(unsigned long vernum); |
55 | extern void do_trace_rcu_torture_read(char *rcutorturename, | 55 | extern void do_trace_rcu_torture_read(const char *rcutorturename, |
56 | struct rcu_head *rhp, | 56 | struct rcu_head *rhp, |
57 | unsigned long secs, | 57 | unsigned long secs, |
58 | unsigned long c_old, | 58 | unsigned long c_old, |
@@ -65,7 +65,7 @@ static inline void rcutorture_record_progress(unsigned long vernum) | |||
65 | { | 65 | { |
66 | } | 66 | } |
67 | #ifdef CONFIG_RCU_TRACE | 67 | #ifdef CONFIG_RCU_TRACE |
68 | extern void do_trace_rcu_torture_read(char *rcutorturename, | 68 | extern void do_trace_rcu_torture_read(const char *rcutorturename, |
69 | struct rcu_head *rhp, | 69 | struct rcu_head *rhp, |
70 | unsigned long secs, | 70 | unsigned long secs, |
71 | unsigned long c_old, | 71 | unsigned long c_old, |
@@ -229,13 +229,9 @@ extern void rcu_irq_exit(void); | |||
229 | #ifdef CONFIG_RCU_USER_QS | 229 | #ifdef CONFIG_RCU_USER_QS |
230 | extern void rcu_user_enter(void); | 230 | extern void rcu_user_enter(void); |
231 | extern void rcu_user_exit(void); | 231 | extern void rcu_user_exit(void); |
232 | extern void rcu_user_enter_after_irq(void); | ||
233 | extern void rcu_user_exit_after_irq(void); | ||
234 | #else | 232 | #else |
235 | static inline void rcu_user_enter(void) { } | 233 | static inline void rcu_user_enter(void) { } |
236 | static inline void rcu_user_exit(void) { } | 234 | static inline void rcu_user_exit(void) { } |
237 | static inline void rcu_user_enter_after_irq(void) { } | ||
238 | static inline void rcu_user_exit_after_irq(void) { } | ||
239 | static inline void rcu_user_hooks_switch(struct task_struct *prev, | 235 | static inline void rcu_user_hooks_switch(struct task_struct *prev, |
240 | struct task_struct *next) { } | 236 | struct task_struct *next) { } |
241 | #endif /* CONFIG_RCU_USER_QS */ | 237 | #endif /* CONFIG_RCU_USER_QS */ |
@@ -1015,4 +1011,22 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; } | |||
1015 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ | 1011 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
1016 | 1012 | ||
1017 | 1013 | ||
1014 | /* Only for use by adaptive-ticks code. */ | ||
1015 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | ||
1016 | extern bool rcu_sys_is_idle(void); | ||
1017 | extern void rcu_sysidle_force_exit(void); | ||
1018 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
1019 | |||
1020 | static inline bool rcu_sys_is_idle(void) | ||
1021 | { | ||
1022 | return false; | ||
1023 | } | ||
1024 | |||
1025 | static inline void rcu_sysidle_force_exit(void) | ||
1026 | { | ||
1027 | } | ||
1028 | |||
1029 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
1030 | |||
1031 | |||
1018 | #endif /* __LINUX_RCUPDATE_H */ | 1032 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 59ebcc89f148..ee2376cfaab3 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -19,12 +19,12 @@ | |||
19 | */ | 19 | */ |
20 | TRACE_EVENT(rcu_utilization, | 20 | TRACE_EVENT(rcu_utilization, |
21 | 21 | ||
22 | TP_PROTO(char *s), | 22 | TP_PROTO(const char *s), |
23 | 23 | ||
24 | TP_ARGS(s), | 24 | TP_ARGS(s), |
25 | 25 | ||
26 | TP_STRUCT__entry( | 26 | TP_STRUCT__entry( |
27 | __field(char *, s) | 27 | __field(const char *, s) |
28 | ), | 28 | ), |
29 | 29 | ||
30 | TP_fast_assign( | 30 | TP_fast_assign( |
@@ -51,14 +51,14 @@ TRACE_EVENT(rcu_utilization, | |||
51 | */ | 51 | */ |
52 | TRACE_EVENT(rcu_grace_period, | 52 | TRACE_EVENT(rcu_grace_period, |
53 | 53 | ||
54 | TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent), | 54 | TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), |
55 | 55 | ||
56 | TP_ARGS(rcuname, gpnum, gpevent), | 56 | TP_ARGS(rcuname, gpnum, gpevent), |
57 | 57 | ||
58 | TP_STRUCT__entry( | 58 | TP_STRUCT__entry( |
59 | __field(char *, rcuname) | 59 | __field(const char *, rcuname) |
60 | __field(unsigned long, gpnum) | 60 | __field(unsigned long, gpnum) |
61 | __field(char *, gpevent) | 61 | __field(const char *, gpevent) |
62 | ), | 62 | ), |
63 | 63 | ||
64 | TP_fast_assign( | 64 | TP_fast_assign( |
@@ -89,21 +89,21 @@ TRACE_EVENT(rcu_grace_period, | |||
89 | */ | 89 | */ |
90 | TRACE_EVENT(rcu_future_grace_period, | 90 | TRACE_EVENT(rcu_future_grace_period, |
91 | 91 | ||
92 | TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed, | 92 | TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, |
93 | unsigned long c, u8 level, int grplo, int grphi, | 93 | unsigned long c, u8 level, int grplo, int grphi, |
94 | char *gpevent), | 94 | const char *gpevent), |
95 | 95 | ||
96 | TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), | 96 | TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), |
97 | 97 | ||
98 | TP_STRUCT__entry( | 98 | TP_STRUCT__entry( |
99 | __field(char *, rcuname) | 99 | __field(const char *, rcuname) |
100 | __field(unsigned long, gpnum) | 100 | __field(unsigned long, gpnum) |
101 | __field(unsigned long, completed) | 101 | __field(unsigned long, completed) |
102 | __field(unsigned long, c) | 102 | __field(unsigned long, c) |
103 | __field(u8, level) | 103 | __field(u8, level) |
104 | __field(int, grplo) | 104 | __field(int, grplo) |
105 | __field(int, grphi) | 105 | __field(int, grphi) |
106 | __field(char *, gpevent) | 106 | __field(const char *, gpevent) |
107 | ), | 107 | ), |
108 | 108 | ||
109 | TP_fast_assign( | 109 | TP_fast_assign( |
@@ -132,13 +132,13 @@ TRACE_EVENT(rcu_future_grace_period, | |||
132 | */ | 132 | */ |
133 | TRACE_EVENT(rcu_grace_period_init, | 133 | TRACE_EVENT(rcu_grace_period_init, |
134 | 134 | ||
135 | TP_PROTO(char *rcuname, unsigned long gpnum, u8 level, | 135 | TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, |
136 | int grplo, int grphi, unsigned long qsmask), | 136 | int grplo, int grphi, unsigned long qsmask), |
137 | 137 | ||
138 | TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), | 138 | TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), |
139 | 139 | ||
140 | TP_STRUCT__entry( | 140 | TP_STRUCT__entry( |
141 | __field(char *, rcuname) | 141 | __field(const char *, rcuname) |
142 | __field(unsigned long, gpnum) | 142 | __field(unsigned long, gpnum) |
143 | __field(u8, level) | 143 | __field(u8, level) |
144 | __field(int, grplo) | 144 | __field(int, grplo) |
@@ -168,12 +168,12 @@ TRACE_EVENT(rcu_grace_period_init, | |||
168 | */ | 168 | */ |
169 | TRACE_EVENT(rcu_preempt_task, | 169 | TRACE_EVENT(rcu_preempt_task, |
170 | 170 | ||
171 | TP_PROTO(char *rcuname, int pid, unsigned long gpnum), | 171 | TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), |
172 | 172 | ||
173 | TP_ARGS(rcuname, pid, gpnum), | 173 | TP_ARGS(rcuname, pid, gpnum), |
174 | 174 | ||
175 | TP_STRUCT__entry( | 175 | TP_STRUCT__entry( |
176 | __field(char *, rcuname) | 176 | __field(const char *, rcuname) |
177 | __field(unsigned long, gpnum) | 177 | __field(unsigned long, gpnum) |
178 | __field(int, pid) | 178 | __field(int, pid) |
179 | ), | 179 | ), |
@@ -195,12 +195,12 @@ TRACE_EVENT(rcu_preempt_task, | |||
195 | */ | 195 | */ |
196 | TRACE_EVENT(rcu_unlock_preempted_task, | 196 | TRACE_EVENT(rcu_unlock_preempted_task, |
197 | 197 | ||
198 | TP_PROTO(char *rcuname, unsigned long gpnum, int pid), | 198 | TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), |
199 | 199 | ||
200 | TP_ARGS(rcuname, gpnum, pid), | 200 | TP_ARGS(rcuname, gpnum, pid), |
201 | 201 | ||
202 | TP_STRUCT__entry( | 202 | TP_STRUCT__entry( |
203 | __field(char *, rcuname) | 203 | __field(const char *, rcuname) |
204 | __field(unsigned long, gpnum) | 204 | __field(unsigned long, gpnum) |
205 | __field(int, pid) | 205 | __field(int, pid) |
206 | ), | 206 | ), |
@@ -224,14 +224,14 @@ TRACE_EVENT(rcu_unlock_preempted_task, | |||
224 | */ | 224 | */ |
225 | TRACE_EVENT(rcu_quiescent_state_report, | 225 | TRACE_EVENT(rcu_quiescent_state_report, |
226 | 226 | ||
227 | TP_PROTO(char *rcuname, unsigned long gpnum, | 227 | TP_PROTO(const char *rcuname, unsigned long gpnum, |
228 | unsigned long mask, unsigned long qsmask, | 228 | unsigned long mask, unsigned long qsmask, |
229 | u8 level, int grplo, int grphi, int gp_tasks), | 229 | u8 level, int grplo, int grphi, int gp_tasks), |
230 | 230 | ||
231 | TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), | 231 | TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), |
232 | 232 | ||
233 | TP_STRUCT__entry( | 233 | TP_STRUCT__entry( |
234 | __field(char *, rcuname) | 234 | __field(const char *, rcuname) |
235 | __field(unsigned long, gpnum) | 235 | __field(unsigned long, gpnum) |
236 | __field(unsigned long, mask) | 236 | __field(unsigned long, mask) |
237 | __field(unsigned long, qsmask) | 237 | __field(unsigned long, qsmask) |
@@ -268,15 +268,15 @@ TRACE_EVENT(rcu_quiescent_state_report, | |||
268 | */ | 268 | */ |
269 | TRACE_EVENT(rcu_fqs, | 269 | TRACE_EVENT(rcu_fqs, |
270 | 270 | ||
271 | TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent), | 271 | TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), |
272 | 272 | ||
273 | TP_ARGS(rcuname, gpnum, cpu, qsevent), | 273 | TP_ARGS(rcuname, gpnum, cpu, qsevent), |
274 | 274 | ||
275 | TP_STRUCT__entry( | 275 | TP_STRUCT__entry( |
276 | __field(char *, rcuname) | 276 | __field(const char *, rcuname) |
277 | __field(unsigned long, gpnum) | 277 | __field(unsigned long, gpnum) |
278 | __field(int, cpu) | 278 | __field(int, cpu) |
279 | __field(char *, qsevent) | 279 | __field(const char *, qsevent) |
280 | ), | 280 | ), |
281 | 281 | ||
282 | TP_fast_assign( | 282 | TP_fast_assign( |
@@ -308,12 +308,12 @@ TRACE_EVENT(rcu_fqs, | |||
308 | */ | 308 | */ |
309 | TRACE_EVENT(rcu_dyntick, | 309 | TRACE_EVENT(rcu_dyntick, |
310 | 310 | ||
311 | TP_PROTO(char *polarity, long long oldnesting, long long newnesting), | 311 | TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), |
312 | 312 | ||
313 | TP_ARGS(polarity, oldnesting, newnesting), | 313 | TP_ARGS(polarity, oldnesting, newnesting), |
314 | 314 | ||
315 | TP_STRUCT__entry( | 315 | TP_STRUCT__entry( |
316 | __field(char *, polarity) | 316 | __field(const char *, polarity) |
317 | __field(long long, oldnesting) | 317 | __field(long long, oldnesting) |
318 | __field(long long, newnesting) | 318 | __field(long long, newnesting) |
319 | ), | 319 | ), |
@@ -352,12 +352,12 @@ TRACE_EVENT(rcu_dyntick, | |||
352 | */ | 352 | */ |
353 | TRACE_EVENT(rcu_prep_idle, | 353 | TRACE_EVENT(rcu_prep_idle, |
354 | 354 | ||
355 | TP_PROTO(char *reason), | 355 | TP_PROTO(const char *reason), |
356 | 356 | ||
357 | TP_ARGS(reason), | 357 | TP_ARGS(reason), |
358 | 358 | ||
359 | TP_STRUCT__entry( | 359 | TP_STRUCT__entry( |
360 | __field(char *, reason) | 360 | __field(const char *, reason) |
361 | ), | 361 | ), |
362 | 362 | ||
363 | TP_fast_assign( | 363 | TP_fast_assign( |
@@ -376,13 +376,13 @@ TRACE_EVENT(rcu_prep_idle, | |||
376 | */ | 376 | */ |
377 | TRACE_EVENT(rcu_callback, | 377 | TRACE_EVENT(rcu_callback, |
378 | 378 | ||
379 | TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy, | 379 | TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, |
380 | long qlen), | 380 | long qlen), |
381 | 381 | ||
382 | TP_ARGS(rcuname, rhp, qlen_lazy, qlen), | 382 | TP_ARGS(rcuname, rhp, qlen_lazy, qlen), |
383 | 383 | ||
384 | TP_STRUCT__entry( | 384 | TP_STRUCT__entry( |
385 | __field(char *, rcuname) | 385 | __field(const char *, rcuname) |
386 | __field(void *, rhp) | 386 | __field(void *, rhp) |
387 | __field(void *, func) | 387 | __field(void *, func) |
388 | __field(long, qlen_lazy) | 388 | __field(long, qlen_lazy) |
@@ -412,13 +412,13 @@ TRACE_EVENT(rcu_callback, | |||
412 | */ | 412 | */ |
413 | TRACE_EVENT(rcu_kfree_callback, | 413 | TRACE_EVENT(rcu_kfree_callback, |
414 | 414 | ||
415 | TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, | 415 | TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, |
416 | long qlen_lazy, long qlen), | 416 | long qlen_lazy, long qlen), |
417 | 417 | ||
418 | TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), | 418 | TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), |
419 | 419 | ||
420 | TP_STRUCT__entry( | 420 | TP_STRUCT__entry( |
421 | __field(char *, rcuname) | 421 | __field(const char *, rcuname) |
422 | __field(void *, rhp) | 422 | __field(void *, rhp) |
423 | __field(unsigned long, offset) | 423 | __field(unsigned long, offset) |
424 | __field(long, qlen_lazy) | 424 | __field(long, qlen_lazy) |
@@ -447,12 +447,12 @@ TRACE_EVENT(rcu_kfree_callback, | |||
447 | */ | 447 | */ |
448 | TRACE_EVENT(rcu_batch_start, | 448 | TRACE_EVENT(rcu_batch_start, |
449 | 449 | ||
450 | TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit), | 450 | TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), |
451 | 451 | ||
452 | TP_ARGS(rcuname, qlen_lazy, qlen, blimit), | 452 | TP_ARGS(rcuname, qlen_lazy, qlen, blimit), |
453 | 453 | ||
454 | TP_STRUCT__entry( | 454 | TP_STRUCT__entry( |
455 | __field(char *, rcuname) | 455 | __field(const char *, rcuname) |
456 | __field(long, qlen_lazy) | 456 | __field(long, qlen_lazy) |
457 | __field(long, qlen) | 457 | __field(long, qlen) |
458 | __field(long, blimit) | 458 | __field(long, blimit) |
@@ -477,12 +477,12 @@ TRACE_EVENT(rcu_batch_start, | |||
477 | */ | 477 | */ |
478 | TRACE_EVENT(rcu_invoke_callback, | 478 | TRACE_EVENT(rcu_invoke_callback, |
479 | 479 | ||
480 | TP_PROTO(char *rcuname, struct rcu_head *rhp), | 480 | TP_PROTO(const char *rcuname, struct rcu_head *rhp), |
481 | 481 | ||
482 | TP_ARGS(rcuname, rhp), | 482 | TP_ARGS(rcuname, rhp), |
483 | 483 | ||
484 | TP_STRUCT__entry( | 484 | TP_STRUCT__entry( |
485 | __field(char *, rcuname) | 485 | __field(const char *, rcuname) |
486 | __field(void *, rhp) | 486 | __field(void *, rhp) |
487 | __field(void *, func) | 487 | __field(void *, func) |
488 | ), | 488 | ), |
@@ -506,12 +506,12 @@ TRACE_EVENT(rcu_invoke_callback, | |||
506 | */ | 506 | */ |
507 | TRACE_EVENT(rcu_invoke_kfree_callback, | 507 | TRACE_EVENT(rcu_invoke_kfree_callback, |
508 | 508 | ||
509 | TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset), | 509 | TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), |
510 | 510 | ||
511 | TP_ARGS(rcuname, rhp, offset), | 511 | TP_ARGS(rcuname, rhp, offset), |
512 | 512 | ||
513 | TP_STRUCT__entry( | 513 | TP_STRUCT__entry( |
514 | __field(char *, rcuname) | 514 | __field(const char *, rcuname) |
515 | __field(void *, rhp) | 515 | __field(void *, rhp) |
516 | __field(unsigned long, offset) | 516 | __field(unsigned long, offset) |
517 | ), | 517 | ), |
@@ -539,13 +539,13 @@ TRACE_EVENT(rcu_invoke_kfree_callback, | |||
539 | */ | 539 | */ |
540 | TRACE_EVENT(rcu_batch_end, | 540 | TRACE_EVENT(rcu_batch_end, |
541 | 541 | ||
542 | TP_PROTO(char *rcuname, int callbacks_invoked, | 542 | TP_PROTO(const char *rcuname, int callbacks_invoked, |
543 | bool cb, bool nr, bool iit, bool risk), | 543 | bool cb, bool nr, bool iit, bool risk), |
544 | 544 | ||
545 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), | 545 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), |
546 | 546 | ||
547 | TP_STRUCT__entry( | 547 | TP_STRUCT__entry( |
548 | __field(char *, rcuname) | 548 | __field(const char *, rcuname) |
549 | __field(int, callbacks_invoked) | 549 | __field(int, callbacks_invoked) |
550 | __field(bool, cb) | 550 | __field(bool, cb) |
551 | __field(bool, nr) | 551 | __field(bool, nr) |
@@ -577,13 +577,13 @@ TRACE_EVENT(rcu_batch_end, | |||
577 | */ | 577 | */ |
578 | TRACE_EVENT(rcu_torture_read, | 578 | TRACE_EVENT(rcu_torture_read, |
579 | 579 | ||
580 | TP_PROTO(char *rcutorturename, struct rcu_head *rhp, | 580 | TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, |
581 | unsigned long secs, unsigned long c_old, unsigned long c), | 581 | unsigned long secs, unsigned long c_old, unsigned long c), |
582 | 582 | ||
583 | TP_ARGS(rcutorturename, rhp, secs, c_old, c), | 583 | TP_ARGS(rcutorturename, rhp, secs, c_old, c), |
584 | 584 | ||
585 | TP_STRUCT__entry( | 585 | TP_STRUCT__entry( |
586 | __field(char *, rcutorturename) | 586 | __field(const char *, rcutorturename) |
587 | __field(struct rcu_head *, rhp) | 587 | __field(struct rcu_head *, rhp) |
588 | __field(unsigned long, secs) | 588 | __field(unsigned long, secs) |
589 | __field(unsigned long, c_old) | 589 | __field(unsigned long, c_old) |
@@ -623,13 +623,13 @@ TRACE_EVENT(rcu_torture_read, | |||
623 | */ | 623 | */ |
624 | TRACE_EVENT(rcu_barrier, | 624 | TRACE_EVENT(rcu_barrier, |
625 | 625 | ||
626 | TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done), | 626 | TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), |
627 | 627 | ||
628 | TP_ARGS(rcuname, s, cpu, cnt, done), | 628 | TP_ARGS(rcuname, s, cpu, cnt, done), |
629 | 629 | ||
630 | TP_STRUCT__entry( | 630 | TP_STRUCT__entry( |
631 | __field(char *, rcuname) | 631 | __field(const char *, rcuname) |
632 | __field(char *, s) | 632 | __field(const char *, s) |
633 | __field(int, cpu) | 633 | __field(int, cpu) |
634 | __field(int, cnt) | 634 | __field(int, cnt) |
635 | __field(unsigned long, done) | 635 | __field(unsigned long, done) |
diff --git a/init/Kconfig b/init/Kconfig index fed81b576f29..cc917d3ec858 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -470,6 +470,7 @@ config TREE_RCU | |||
470 | config TREE_PREEMPT_RCU | 470 | config TREE_PREEMPT_RCU |
471 | bool "Preemptible tree-based hierarchical RCU" | 471 | bool "Preemptible tree-based hierarchical RCU" |
472 | depends on PREEMPT | 472 | depends on PREEMPT |
473 | select IRQ_WORK | ||
473 | help | 474 | help |
474 | This option selects the RCU implementation that is | 475 | This option selects the RCU implementation that is |
475 | designed for very large SMP systems with hundreds or | 476 | designed for very large SMP systems with hundreds or |
diff --git a/kernel/rcu.h b/kernel/rcu.h index 7f8e7590e3e5..77131966c4ad 100644 --- a/kernel/rcu.h +++ b/kernel/rcu.h | |||
@@ -67,12 +67,15 @@ | |||
67 | 67 | ||
68 | extern struct debug_obj_descr rcuhead_debug_descr; | 68 | extern struct debug_obj_descr rcuhead_debug_descr; |
69 | 69 | ||
70 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 70 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
71 | { | 71 | { |
72 | debug_object_activate(head, &rcuhead_debug_descr); | 72 | int r1; |
73 | |||
74 | r1 = debug_object_activate(head, &rcuhead_debug_descr); | ||
73 | debug_object_active_state(head, &rcuhead_debug_descr, | 75 | debug_object_active_state(head, &rcuhead_debug_descr, |
74 | STATE_RCU_HEAD_READY, | 76 | STATE_RCU_HEAD_READY, |
75 | STATE_RCU_HEAD_QUEUED); | 77 | STATE_RCU_HEAD_QUEUED); |
78 | return r1; | ||
76 | } | 79 | } |
77 | 80 | ||
78 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | 81 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
@@ -83,8 +86,9 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
83 | debug_object_deactivate(head, &rcuhead_debug_descr); | 86 | debug_object_deactivate(head, &rcuhead_debug_descr); |
84 | } | 87 | } |
85 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 88 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
86 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 89 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
87 | { | 90 | { |
91 | return 0; | ||
88 | } | 92 | } |
89 | 93 | ||
90 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | 94 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
@@ -94,7 +98,7 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
94 | 98 | ||
95 | extern void kfree(const void *); | 99 | extern void kfree(const void *); |
96 | 100 | ||
97 | static inline bool __rcu_reclaim(char *rn, struct rcu_head *head) | 101 | static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) |
98 | { | 102 | { |
99 | unsigned long offset = (unsigned long)head->func; | 103 | unsigned long offset = (unsigned long)head->func; |
100 | 104 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index cce6ba8bbace..33eb4620aa17 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -212,43 +212,6 @@ static inline void debug_rcu_head_free(struct rcu_head *head) | |||
212 | } | 212 | } |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * fixup_init is called when: | ||
216 | * - an active object is initialized | ||
217 | */ | ||
218 | static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) | ||
219 | { | ||
220 | struct rcu_head *head = addr; | ||
221 | |||
222 | switch (state) { | ||
223 | case ODEBUG_STATE_ACTIVE: | ||
224 | /* | ||
225 | * Ensure that queued callbacks are all executed. | ||
226 | * If we detect that we are nested in a RCU read-side critical | ||
227 | * section, we should simply fail, otherwise we would deadlock. | ||
228 | * In !PREEMPT configurations, there is no way to tell if we are | ||
229 | * in a RCU read-side critical section or not, so we never | ||
230 | * attempt any fixup and just print a warning. | ||
231 | */ | ||
232 | #ifndef CONFIG_PREEMPT | ||
233 | WARN_ON_ONCE(1); | ||
234 | return 0; | ||
235 | #endif | ||
236 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | ||
237 | irqs_disabled()) { | ||
238 | WARN_ON_ONCE(1); | ||
239 | return 0; | ||
240 | } | ||
241 | rcu_barrier(); | ||
242 | rcu_barrier_sched(); | ||
243 | rcu_barrier_bh(); | ||
244 | debug_object_init(head, &rcuhead_debug_descr); | ||
245 | return 1; | ||
246 | default: | ||
247 | return 0; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * fixup_activate is called when: | 215 | * fixup_activate is called when: |
253 | * - an active object is activated | 216 | * - an active object is activated |
254 | * - an unknown object is activated (might be a statically initialized object) | 217 | * - an unknown object is activated (might be a statically initialized object) |
@@ -268,69 +231,8 @@ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) | |||
268 | debug_object_init(head, &rcuhead_debug_descr); | 231 | debug_object_init(head, &rcuhead_debug_descr); |
269 | debug_object_activate(head, &rcuhead_debug_descr); | 232 | debug_object_activate(head, &rcuhead_debug_descr); |
270 | return 0; | 233 | return 0; |
271 | |||
272 | case ODEBUG_STATE_ACTIVE: | ||
273 | /* | ||
274 | * Ensure that queued callbacks are all executed. | ||
275 | * If we detect that we are nested in a RCU read-side critical | ||
276 | * section, we should simply fail, otherwise we would deadlock. | ||
277 | * In !PREEMPT configurations, there is no way to tell if we are | ||
278 | * in a RCU read-side critical section or not, so we never | ||
279 | * attempt any fixup and just print a warning. | ||
280 | */ | ||
281 | #ifndef CONFIG_PREEMPT | ||
282 | WARN_ON_ONCE(1); | ||
283 | return 0; | ||
284 | #endif | ||
285 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | ||
286 | irqs_disabled()) { | ||
287 | WARN_ON_ONCE(1); | ||
288 | return 0; | ||
289 | } | ||
290 | rcu_barrier(); | ||
291 | rcu_barrier_sched(); | ||
292 | rcu_barrier_bh(); | ||
293 | debug_object_activate(head, &rcuhead_debug_descr); | ||
294 | return 1; | ||
295 | default: | 234 | default: |
296 | return 0; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * fixup_free is called when: | ||
302 | * - an active object is freed | ||
303 | */ | ||
304 | static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) | ||
305 | { | ||
306 | struct rcu_head *head = addr; | ||
307 | |||
308 | switch (state) { | ||
309 | case ODEBUG_STATE_ACTIVE: | ||
310 | /* | ||
311 | * Ensure that queued callbacks are all executed. | ||
312 | * If we detect that we are nested in a RCU read-side critical | ||
313 | * section, we should simply fail, otherwise we would deadlock. | ||
314 | * In !PREEMPT configurations, there is no way to tell if we are | ||
315 | * in a RCU read-side critical section or not, so we never | ||
316 | * attempt any fixup and just print a warning. | ||
317 | */ | ||
318 | #ifndef CONFIG_PREEMPT | ||
319 | WARN_ON_ONCE(1); | ||
320 | return 0; | ||
321 | #endif | ||
322 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || | ||
323 | irqs_disabled()) { | ||
324 | WARN_ON_ONCE(1); | ||
325 | return 0; | ||
326 | } | ||
327 | rcu_barrier(); | ||
328 | rcu_barrier_sched(); | ||
329 | rcu_barrier_bh(); | ||
330 | debug_object_free(head, &rcuhead_debug_descr); | ||
331 | return 1; | 235 | return 1; |
332 | default: | ||
333 | return 0; | ||
334 | } | 236 | } |
335 | } | 237 | } |
336 | 238 | ||
@@ -369,15 +271,13 @@ EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); | |||
369 | 271 | ||
370 | struct debug_obj_descr rcuhead_debug_descr = { | 272 | struct debug_obj_descr rcuhead_debug_descr = { |
371 | .name = "rcu_head", | 273 | .name = "rcu_head", |
372 | .fixup_init = rcuhead_fixup_init, | ||
373 | .fixup_activate = rcuhead_fixup_activate, | 274 | .fixup_activate = rcuhead_fixup_activate, |
374 | .fixup_free = rcuhead_fixup_free, | ||
375 | }; | 275 | }; |
376 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | 276 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); |
377 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 277 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
378 | 278 | ||
379 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) | 279 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
380 | void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp, | 280 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
381 | unsigned long secs, | 281 | unsigned long secs, |
382 | unsigned long c_old, unsigned long c) | 282 | unsigned long c_old, unsigned long c) |
383 | { | 283 | { |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index aa344111de3e..9ed6075dc562 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -264,7 +264,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
264 | */ | 264 | */ |
265 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | 265 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
266 | { | 266 | { |
267 | char *rn = NULL; | 267 | const char *rn = NULL; |
268 | struct rcu_head *next, *list; | 268 | struct rcu_head *next, *list; |
269 | unsigned long flags; | 269 | unsigned long flags; |
270 | RCU_TRACE(int cb_count = 0); | 270 | RCU_TRACE(int cb_count = 0); |
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 0cd385acccfa..280d06cae352 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -36,7 +36,7 @@ struct rcu_ctrlblk { | |||
36 | RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */ | 36 | RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */ |
37 | RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */ | 37 | RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */ |
38 | RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */ | 38 | RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */ |
39 | RCU_TRACE(char *name); /* Name of RCU type. */ | 39 | RCU_TRACE(const char *name); /* Name of RCU type. */ |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* Definition for rcupdate control block. */ | 42 | /* Definition for rcupdate control block. */ |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index f4871e52c546..be63101c6175 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -52,72 +52,78 @@ | |||
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); | 53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); |
54 | 54 | ||
55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | 55 | static int fqs_duration; |
56 | static int nfakewriters = 4; /* # fake writer threads */ | ||
57 | static int stat_interval = 60; /* Interval between stats, in seconds. */ | ||
58 | /* Zero means "only at end of test". */ | ||
59 | static bool verbose; /* Print more debug info. */ | ||
60 | static bool test_no_idle_hz = true; | ||
61 | /* Test RCU support for tickless idle CPUs. */ | ||
62 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ | ||
63 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | ||
64 | static int irqreader = 1; /* RCU readers from irq (timers). */ | ||
65 | static int fqs_duration; /* Duration of bursts (us), 0 to disable. */ | ||
66 | static int fqs_holdoff; /* Hold time within burst (us). */ | ||
67 | static int fqs_stutter = 3; /* Wait time between bursts (s). */ | ||
68 | static int n_barrier_cbs; /* Number of callbacks to test RCU barriers. */ | ||
69 | static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */ | ||
70 | static int onoff_holdoff; /* Seconds after boot before CPU hotplugs. */ | ||
71 | static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */ | ||
72 | static int stall_cpu; /* CPU-stall duration (s). 0 for no stall. */ | ||
73 | static int stall_cpu_holdoff = 10; /* Time to wait until stall (s). */ | ||
74 | static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */ | ||
75 | static int test_boost_interval = 7; /* Interval between boost tests, seconds. */ | ||
76 | static int test_boost_duration = 4; /* Duration of each boost test, seconds. */ | ||
77 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | ||
78 | |||
79 | module_param(nreaders, int, 0444); | ||
80 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | ||
81 | module_param(nfakewriters, int, 0444); | ||
82 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | ||
83 | module_param(stat_interval, int, 0644); | ||
84 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | ||
85 | module_param(verbose, bool, 0444); | ||
86 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | ||
87 | module_param(test_no_idle_hz, bool, 0444); | ||
88 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | ||
89 | module_param(shuffle_interval, int, 0444); | ||
90 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | ||
91 | module_param(stutter, int, 0444); | ||
92 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | ||
93 | module_param(irqreader, int, 0444); | ||
94 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | ||
95 | module_param(fqs_duration, int, 0444); | 56 | module_param(fqs_duration, int, 0444); |
96 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)"); | 57 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); |
58 | static int fqs_holdoff; | ||
97 | module_param(fqs_holdoff, int, 0444); | 59 | module_param(fqs_holdoff, int, 0444); |
98 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | 60 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); |
61 | static int fqs_stutter = 3; | ||
99 | module_param(fqs_stutter, int, 0444); | 62 | module_param(fqs_stutter, int, 0444); |
100 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | 63 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); |
64 | static bool gp_exp; | ||
65 | module_param(gp_exp, bool, 0444); | ||
66 | MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives"); | ||
67 | static bool gp_normal; | ||
68 | module_param(gp_normal, bool, 0444); | ||
69 | MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives"); | ||
70 | static int irqreader = 1; | ||
71 | module_param(irqreader, int, 0444); | ||
72 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | ||
73 | static int n_barrier_cbs; | ||
101 | module_param(n_barrier_cbs, int, 0444); | 74 | module_param(n_barrier_cbs, int, 0444); |
102 | MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing"); | 75 | MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing"); |
103 | module_param(onoff_interval, int, 0444); | 76 | static int nfakewriters = 4; |
104 | MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); | 77 | module_param(nfakewriters, int, 0444); |
78 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | ||
79 | static int nreaders = -1; | ||
80 | module_param(nreaders, int, 0444); | ||
81 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | ||
82 | static int object_debug; | ||
83 | module_param(object_debug, int, 0444); | ||
84 | MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing"); | ||
85 | static int onoff_holdoff; | ||
105 | module_param(onoff_holdoff, int, 0444); | 86 | module_param(onoff_holdoff, int, 0444); |
106 | MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)"); | 87 | MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)"); |
88 | static int onoff_interval; | ||
89 | module_param(onoff_interval, int, 0444); | ||
90 | MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); | ||
91 | static int shuffle_interval = 3; | ||
92 | module_param(shuffle_interval, int, 0444); | ||
93 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | ||
94 | static int shutdown_secs; | ||
107 | module_param(shutdown_secs, int, 0444); | 95 | module_param(shutdown_secs, int, 0444); |
108 | MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable."); | 96 | MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable."); |
97 | static int stall_cpu; | ||
109 | module_param(stall_cpu, int, 0444); | 98 | module_param(stall_cpu, int, 0444); |
110 | MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable."); | 99 | MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable."); |
100 | static int stall_cpu_holdoff = 10; | ||
111 | module_param(stall_cpu_holdoff, int, 0444); | 101 | module_param(stall_cpu_holdoff, int, 0444); |
112 | MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s)."); | 102 | MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s)."); |
103 | static int stat_interval = 60; | ||
104 | module_param(stat_interval, int, 0644); | ||
105 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | ||
106 | static int stutter = 5; | ||
107 | module_param(stutter, int, 0444); | ||
108 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | ||
109 | static int test_boost = 1; | ||
113 | module_param(test_boost, int, 0444); | 110 | module_param(test_boost, int, 0444); |
114 | MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); | 111 | MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
115 | module_param(test_boost_interval, int, 0444); | 112 | static int test_boost_duration = 4; |
116 | MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); | ||
117 | module_param(test_boost_duration, int, 0444); | 113 | module_param(test_boost_duration, int, 0444); |
118 | MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); | 114 | MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); |
115 | static int test_boost_interval = 7; | ||
116 | module_param(test_boost_interval, int, 0444); | ||
117 | MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); | ||
118 | static bool test_no_idle_hz = true; | ||
119 | module_param(test_no_idle_hz, bool, 0444); | ||
120 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | ||
121 | static char *torture_type = "rcu"; | ||
119 | module_param(torture_type, charp, 0444); | 122 | module_param(torture_type, charp, 0444); |
120 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 123 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
124 | static bool verbose; | ||
125 | module_param(verbose, bool, 0444); | ||
126 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | ||
121 | 127 | ||
122 | #define TORTURE_FLAG "-torture:" | 128 | #define TORTURE_FLAG "-torture:" |
123 | #define PRINTK_STRING(s) \ | 129 | #define PRINTK_STRING(s) \ |
@@ -267,7 +273,7 @@ rcutorture_shutdown_notify(struct notifier_block *unused1, | |||
267 | * Absorb kthreads into a kernel function that won't return, so that | 273 | * Absorb kthreads into a kernel function that won't return, so that |
268 | * they won't ever access module text or data again. | 274 | * they won't ever access module text or data again. |
269 | */ | 275 | */ |
270 | static void rcutorture_shutdown_absorb(char *title) | 276 | static void rcutorture_shutdown_absorb(const char *title) |
271 | { | 277 | { |
272 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | 278 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { |
273 | pr_notice( | 279 | pr_notice( |
@@ -337,7 +343,7 @@ rcu_random(struct rcu_random_state *rrsp) | |||
337 | } | 343 | } |
338 | 344 | ||
339 | static void | 345 | static void |
340 | rcu_stutter_wait(char *title) | 346 | rcu_stutter_wait(const char *title) |
341 | { | 347 | { |
342 | while (stutter_pause_test || !rcutorture_runnable) { | 348 | while (stutter_pause_test || !rcutorture_runnable) { |
343 | if (rcutorture_runnable) | 349 | if (rcutorture_runnable) |
@@ -360,13 +366,14 @@ struct rcu_torture_ops { | |||
360 | int (*completed)(void); | 366 | int (*completed)(void); |
361 | void (*deferred_free)(struct rcu_torture *p); | 367 | void (*deferred_free)(struct rcu_torture *p); |
362 | void (*sync)(void); | 368 | void (*sync)(void); |
369 | void (*exp_sync)(void); | ||
363 | void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 370 | void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
364 | void (*cb_barrier)(void); | 371 | void (*cb_barrier)(void); |
365 | void (*fqs)(void); | 372 | void (*fqs)(void); |
366 | int (*stats)(char *page); | 373 | int (*stats)(char *page); |
367 | int irq_capable; | 374 | int irq_capable; |
368 | int can_boost; | 375 | int can_boost; |
369 | char *name; | 376 | const char *name; |
370 | }; | 377 | }; |
371 | 378 | ||
372 | static struct rcu_torture_ops *cur_ops; | 379 | static struct rcu_torture_ops *cur_ops; |
@@ -443,81 +450,27 @@ static void rcu_torture_deferred_free(struct rcu_torture *p) | |||
443 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | 450 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
444 | } | 451 | } |
445 | 452 | ||
446 | static struct rcu_torture_ops rcu_ops = { | ||
447 | .init = NULL, | ||
448 | .readlock = rcu_torture_read_lock, | ||
449 | .read_delay = rcu_read_delay, | ||
450 | .readunlock = rcu_torture_read_unlock, | ||
451 | .completed = rcu_torture_completed, | ||
452 | .deferred_free = rcu_torture_deferred_free, | ||
453 | .sync = synchronize_rcu, | ||
454 | .call = call_rcu, | ||
455 | .cb_barrier = rcu_barrier, | ||
456 | .fqs = rcu_force_quiescent_state, | ||
457 | .stats = NULL, | ||
458 | .irq_capable = 1, | ||
459 | .can_boost = rcu_can_boost(), | ||
460 | .name = "rcu" | ||
461 | }; | ||
462 | |||
463 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | ||
464 | { | ||
465 | int i; | ||
466 | struct rcu_torture *rp; | ||
467 | struct rcu_torture *rp1; | ||
468 | |||
469 | cur_ops->sync(); | ||
470 | list_add(&p->rtort_free, &rcu_torture_removed); | ||
471 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { | ||
472 | i = rp->rtort_pipe_count; | ||
473 | if (i > RCU_TORTURE_PIPE_LEN) | ||
474 | i = RCU_TORTURE_PIPE_LEN; | ||
475 | atomic_inc(&rcu_torture_wcount[i]); | ||
476 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | ||
477 | rp->rtort_mbtest = 0; | ||
478 | list_del(&rp->rtort_free); | ||
479 | rcu_torture_free(rp); | ||
480 | } | ||
481 | } | ||
482 | } | ||
483 | |||
484 | static void rcu_sync_torture_init(void) | 453 | static void rcu_sync_torture_init(void) |
485 | { | 454 | { |
486 | INIT_LIST_HEAD(&rcu_torture_removed); | 455 | INIT_LIST_HEAD(&rcu_torture_removed); |
487 | } | 456 | } |
488 | 457 | ||
489 | static struct rcu_torture_ops rcu_sync_ops = { | 458 | static struct rcu_torture_ops rcu_ops = { |
490 | .init = rcu_sync_torture_init, | 459 | .init = rcu_sync_torture_init, |
491 | .readlock = rcu_torture_read_lock, | 460 | .readlock = rcu_torture_read_lock, |
492 | .read_delay = rcu_read_delay, | 461 | .read_delay = rcu_read_delay, |
493 | .readunlock = rcu_torture_read_unlock, | 462 | .readunlock = rcu_torture_read_unlock, |
494 | .completed = rcu_torture_completed, | 463 | .completed = rcu_torture_completed, |
495 | .deferred_free = rcu_sync_torture_deferred_free, | 464 | .deferred_free = rcu_torture_deferred_free, |
496 | .sync = synchronize_rcu, | 465 | .sync = synchronize_rcu, |
497 | .call = NULL, | 466 | .exp_sync = synchronize_rcu_expedited, |
498 | .cb_barrier = NULL, | 467 | .call = call_rcu, |
499 | .fqs = rcu_force_quiescent_state, | 468 | .cb_barrier = rcu_barrier, |
500 | .stats = NULL, | ||
501 | .irq_capable = 1, | ||
502 | .can_boost = rcu_can_boost(), | ||
503 | .name = "rcu_sync" | ||
504 | }; | ||
505 | |||
506 | static struct rcu_torture_ops rcu_expedited_ops = { | ||
507 | .init = rcu_sync_torture_init, | ||
508 | .readlock = rcu_torture_read_lock, | ||
509 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
510 | .readunlock = rcu_torture_read_unlock, | ||
511 | .completed = rcu_no_completed, | ||
512 | .deferred_free = rcu_sync_torture_deferred_free, | ||
513 | .sync = synchronize_rcu_expedited, | ||
514 | .call = NULL, | ||
515 | .cb_barrier = NULL, | ||
516 | .fqs = rcu_force_quiescent_state, | 469 | .fqs = rcu_force_quiescent_state, |
517 | .stats = NULL, | 470 | .stats = NULL, |
518 | .irq_capable = 1, | 471 | .irq_capable = 1, |
519 | .can_boost = rcu_can_boost(), | 472 | .can_boost = rcu_can_boost(), |
520 | .name = "rcu_expedited" | 473 | .name = "rcu" |
521 | }; | 474 | }; |
522 | 475 | ||
523 | /* | 476 | /* |
@@ -546,13 +499,14 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | |||
546 | } | 499 | } |
547 | 500 | ||
548 | static struct rcu_torture_ops rcu_bh_ops = { | 501 | static struct rcu_torture_ops rcu_bh_ops = { |
549 | .init = NULL, | 502 | .init = rcu_sync_torture_init, |
550 | .readlock = rcu_bh_torture_read_lock, | 503 | .readlock = rcu_bh_torture_read_lock, |
551 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 504 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
552 | .readunlock = rcu_bh_torture_read_unlock, | 505 | .readunlock = rcu_bh_torture_read_unlock, |
553 | .completed = rcu_bh_torture_completed, | 506 | .completed = rcu_bh_torture_completed, |
554 | .deferred_free = rcu_bh_torture_deferred_free, | 507 | .deferred_free = rcu_bh_torture_deferred_free, |
555 | .sync = synchronize_rcu_bh, | 508 | .sync = synchronize_rcu_bh, |
509 | .exp_sync = synchronize_rcu_bh_expedited, | ||
556 | .call = call_rcu_bh, | 510 | .call = call_rcu_bh, |
557 | .cb_barrier = rcu_barrier_bh, | 511 | .cb_barrier = rcu_barrier_bh, |
558 | .fqs = rcu_bh_force_quiescent_state, | 512 | .fqs = rcu_bh_force_quiescent_state, |
@@ -561,38 +515,6 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
561 | .name = "rcu_bh" | 515 | .name = "rcu_bh" |
562 | }; | 516 | }; |
563 | 517 | ||
564 | static struct rcu_torture_ops rcu_bh_sync_ops = { | ||
565 | .init = rcu_sync_torture_init, | ||
566 | .readlock = rcu_bh_torture_read_lock, | ||
567 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
568 | .readunlock = rcu_bh_torture_read_unlock, | ||
569 | .completed = rcu_bh_torture_completed, | ||
570 | .deferred_free = rcu_sync_torture_deferred_free, | ||
571 | .sync = synchronize_rcu_bh, | ||
572 | .call = NULL, | ||
573 | .cb_barrier = NULL, | ||
574 | .fqs = rcu_bh_force_quiescent_state, | ||
575 | .stats = NULL, | ||
576 | .irq_capable = 1, | ||
577 | .name = "rcu_bh_sync" | ||
578 | }; | ||
579 | |||
580 | static struct rcu_torture_ops rcu_bh_expedited_ops = { | ||
581 | .init = rcu_sync_torture_init, | ||
582 | .readlock = rcu_bh_torture_read_lock, | ||
583 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
584 | .readunlock = rcu_bh_torture_read_unlock, | ||
585 | .completed = rcu_bh_torture_completed, | ||
586 | .deferred_free = rcu_sync_torture_deferred_free, | ||
587 | .sync = synchronize_rcu_bh_expedited, | ||
588 | .call = NULL, | ||
589 | .cb_barrier = NULL, | ||
590 | .fqs = rcu_bh_force_quiescent_state, | ||
591 | .stats = NULL, | ||
592 | .irq_capable = 1, | ||
593 | .name = "rcu_bh_expedited" | ||
594 | }; | ||
595 | |||
596 | /* | 518 | /* |
597 | * Definitions for srcu torture testing. | 519 | * Definitions for srcu torture testing. |
598 | */ | 520 | */ |
@@ -667,6 +589,11 @@ static int srcu_torture_stats(char *page) | |||
667 | return cnt; | 589 | return cnt; |
668 | } | 590 | } |
669 | 591 | ||
592 | static void srcu_torture_synchronize_expedited(void) | ||
593 | { | ||
594 | synchronize_srcu_expedited(&srcu_ctl); | ||
595 | } | ||
596 | |||
670 | static struct rcu_torture_ops srcu_ops = { | 597 | static struct rcu_torture_ops srcu_ops = { |
671 | .init = rcu_sync_torture_init, | 598 | .init = rcu_sync_torture_init, |
672 | .readlock = srcu_torture_read_lock, | 599 | .readlock = srcu_torture_read_lock, |
@@ -675,45 +602,13 @@ static struct rcu_torture_ops srcu_ops = { | |||
675 | .completed = srcu_torture_completed, | 602 | .completed = srcu_torture_completed, |
676 | .deferred_free = srcu_torture_deferred_free, | 603 | .deferred_free = srcu_torture_deferred_free, |
677 | .sync = srcu_torture_synchronize, | 604 | .sync = srcu_torture_synchronize, |
605 | .exp_sync = srcu_torture_synchronize_expedited, | ||
678 | .call = srcu_torture_call, | 606 | .call = srcu_torture_call, |
679 | .cb_barrier = srcu_torture_barrier, | 607 | .cb_barrier = srcu_torture_barrier, |
680 | .stats = srcu_torture_stats, | 608 | .stats = srcu_torture_stats, |
681 | .name = "srcu" | 609 | .name = "srcu" |
682 | }; | 610 | }; |
683 | 611 | ||
684 | static struct rcu_torture_ops srcu_sync_ops = { | ||
685 | .init = rcu_sync_torture_init, | ||
686 | .readlock = srcu_torture_read_lock, | ||
687 | .read_delay = srcu_read_delay, | ||
688 | .readunlock = srcu_torture_read_unlock, | ||
689 | .completed = srcu_torture_completed, | ||
690 | .deferred_free = rcu_sync_torture_deferred_free, | ||
691 | .sync = srcu_torture_synchronize, | ||
692 | .call = NULL, | ||
693 | .cb_barrier = NULL, | ||
694 | .stats = srcu_torture_stats, | ||
695 | .name = "srcu_sync" | ||
696 | }; | ||
697 | |||
698 | static void srcu_torture_synchronize_expedited(void) | ||
699 | { | ||
700 | synchronize_srcu_expedited(&srcu_ctl); | ||
701 | } | ||
702 | |||
703 | static struct rcu_torture_ops srcu_expedited_ops = { | ||
704 | .init = rcu_sync_torture_init, | ||
705 | .readlock = srcu_torture_read_lock, | ||
706 | .read_delay = srcu_read_delay, | ||
707 | .readunlock = srcu_torture_read_unlock, | ||
708 | .completed = srcu_torture_completed, | ||
709 | .deferred_free = rcu_sync_torture_deferred_free, | ||
710 | .sync = srcu_torture_synchronize_expedited, | ||
711 | .call = NULL, | ||
712 | .cb_barrier = NULL, | ||
713 | .stats = srcu_torture_stats, | ||
714 | .name = "srcu_expedited" | ||
715 | }; | ||
716 | |||
717 | /* | 612 | /* |
718 | * Definitions for sched torture testing. | 613 | * Definitions for sched torture testing. |
719 | */ | 614 | */ |
@@ -742,6 +637,8 @@ static struct rcu_torture_ops sched_ops = { | |||
742 | .completed = rcu_no_completed, | 637 | .completed = rcu_no_completed, |
743 | .deferred_free = rcu_sched_torture_deferred_free, | 638 | .deferred_free = rcu_sched_torture_deferred_free, |
744 | .sync = synchronize_sched, | 639 | .sync = synchronize_sched, |
640 | .exp_sync = synchronize_sched_expedited, | ||
641 | .call = call_rcu_sched, | ||
745 | .cb_barrier = rcu_barrier_sched, | 642 | .cb_barrier = rcu_barrier_sched, |
746 | .fqs = rcu_sched_force_quiescent_state, | 643 | .fqs = rcu_sched_force_quiescent_state, |
747 | .stats = NULL, | 644 | .stats = NULL, |
@@ -749,35 +646,6 @@ static struct rcu_torture_ops sched_ops = { | |||
749 | .name = "sched" | 646 | .name = "sched" |
750 | }; | 647 | }; |
751 | 648 | ||
752 | static struct rcu_torture_ops sched_sync_ops = { | ||
753 | .init = rcu_sync_torture_init, | ||
754 | .readlock = sched_torture_read_lock, | ||
755 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
756 | .readunlock = sched_torture_read_unlock, | ||
757 | .completed = rcu_no_completed, | ||
758 | .deferred_free = rcu_sync_torture_deferred_free, | ||
759 | .sync = synchronize_sched, | ||
760 | .cb_barrier = NULL, | ||
761 | .fqs = rcu_sched_force_quiescent_state, | ||
762 | .stats = NULL, | ||
763 | .name = "sched_sync" | ||
764 | }; | ||
765 | |||
766 | static struct rcu_torture_ops sched_expedited_ops = { | ||
767 | .init = rcu_sync_torture_init, | ||
768 | .readlock = sched_torture_read_lock, | ||
769 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
770 | .readunlock = sched_torture_read_unlock, | ||
771 | .completed = rcu_no_completed, | ||
772 | .deferred_free = rcu_sync_torture_deferred_free, | ||
773 | .sync = synchronize_sched_expedited, | ||
774 | .cb_barrier = NULL, | ||
775 | .fqs = rcu_sched_force_quiescent_state, | ||
776 | .stats = NULL, | ||
777 | .irq_capable = 1, | ||
778 | .name = "sched_expedited" | ||
779 | }; | ||
780 | |||
781 | /* | 649 | /* |
782 | * RCU torture priority-boost testing. Runs one real-time thread per | 650 | * RCU torture priority-boost testing. Runs one real-time thread per |
783 | * CPU for moderate bursts, repeatedly registering RCU callbacks and | 651 | * CPU for moderate bursts, repeatedly registering RCU callbacks and |
@@ -927,9 +795,10 @@ rcu_torture_fqs(void *arg) | |||
927 | static int | 795 | static int |
928 | rcu_torture_writer(void *arg) | 796 | rcu_torture_writer(void *arg) |
929 | { | 797 | { |
798 | bool exp; | ||
930 | int i; | 799 | int i; |
931 | long oldbatch = rcu_batches_completed(); | ||
932 | struct rcu_torture *rp; | 800 | struct rcu_torture *rp; |
801 | struct rcu_torture *rp1; | ||
933 | struct rcu_torture *old_rp; | 802 | struct rcu_torture *old_rp; |
934 | static DEFINE_RCU_RANDOM(rand); | 803 | static DEFINE_RCU_RANDOM(rand); |
935 | 804 | ||
@@ -954,10 +823,33 @@ rcu_torture_writer(void *arg) | |||
954 | i = RCU_TORTURE_PIPE_LEN; | 823 | i = RCU_TORTURE_PIPE_LEN; |
955 | atomic_inc(&rcu_torture_wcount[i]); | 824 | atomic_inc(&rcu_torture_wcount[i]); |
956 | old_rp->rtort_pipe_count++; | 825 | old_rp->rtort_pipe_count++; |
957 | cur_ops->deferred_free(old_rp); | 826 | if (gp_normal == gp_exp) |
827 | exp = !!(rcu_random(&rand) & 0x80); | ||
828 | else | ||
829 | exp = gp_exp; | ||
830 | if (!exp) { | ||
831 | cur_ops->deferred_free(old_rp); | ||
832 | } else { | ||
833 | cur_ops->exp_sync(); | ||
834 | list_add(&old_rp->rtort_free, | ||
835 | &rcu_torture_removed); | ||
836 | list_for_each_entry_safe(rp, rp1, | ||
837 | &rcu_torture_removed, | ||
838 | rtort_free) { | ||
839 | i = rp->rtort_pipe_count; | ||
840 | if (i > RCU_TORTURE_PIPE_LEN) | ||
841 | i = RCU_TORTURE_PIPE_LEN; | ||
842 | atomic_inc(&rcu_torture_wcount[i]); | ||
843 | if (++rp->rtort_pipe_count >= | ||
844 | RCU_TORTURE_PIPE_LEN) { | ||
845 | rp->rtort_mbtest = 0; | ||
846 | list_del(&rp->rtort_free); | ||
847 | rcu_torture_free(rp); | ||
848 | } | ||
849 | } | ||
850 | } | ||
958 | } | 851 | } |
959 | rcutorture_record_progress(++rcu_torture_current_version); | 852 | rcutorture_record_progress(++rcu_torture_current_version); |
960 | oldbatch = cur_ops->completed(); | ||
961 | rcu_stutter_wait("rcu_torture_writer"); | 853 | rcu_stutter_wait("rcu_torture_writer"); |
962 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 854 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
963 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 855 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
@@ -983,10 +875,18 @@ rcu_torture_fakewriter(void *arg) | |||
983 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 875 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); |
984 | udelay(rcu_random(&rand) & 0x3ff); | 876 | udelay(rcu_random(&rand) & 0x3ff); |
985 | if (cur_ops->cb_barrier != NULL && | 877 | if (cur_ops->cb_barrier != NULL && |
986 | rcu_random(&rand) % (nfakewriters * 8) == 0) | 878 | rcu_random(&rand) % (nfakewriters * 8) == 0) { |
987 | cur_ops->cb_barrier(); | 879 | cur_ops->cb_barrier(); |
988 | else | 880 | } else if (gp_normal == gp_exp) { |
881 | if (rcu_random(&rand) & 0x80) | ||
882 | cur_ops->sync(); | ||
883 | else | ||
884 | cur_ops->exp_sync(); | ||
885 | } else if (gp_normal) { | ||
989 | cur_ops->sync(); | 886 | cur_ops->sync(); |
887 | } else { | ||
888 | cur_ops->exp_sync(); | ||
889 | } | ||
990 | rcu_stutter_wait("rcu_torture_fakewriter"); | 890 | rcu_stutter_wait("rcu_torture_fakewriter"); |
991 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 891 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
992 | 892 | ||
@@ -1364,7 +1264,7 @@ rcu_torture_stutter(void *arg) | |||
1364 | } | 1264 | } |
1365 | 1265 | ||
1366 | static inline void | 1266 | static inline void |
1367 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag) | 1267 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
1368 | { | 1268 | { |
1369 | pr_alert("%s" TORTURE_FLAG | 1269 | pr_alert("%s" TORTURE_FLAG |
1370 | "--- %s: nreaders=%d nfakewriters=%d " | 1270 | "--- %s: nreaders=%d nfakewriters=%d " |
@@ -1534,7 +1434,13 @@ rcu_torture_onoff(void *arg) | |||
1534 | torture_type, cpu); | 1434 | torture_type, cpu); |
1535 | starttime = jiffies; | 1435 | starttime = jiffies; |
1536 | n_online_attempts++; | 1436 | n_online_attempts++; |
1537 | if (cpu_up(cpu) == 0) { | 1437 | ret = cpu_up(cpu); |
1438 | if (ret) { | ||
1439 | if (verbose) | ||
1440 | pr_alert("%s" TORTURE_FLAG | ||
1441 | "rcu_torture_onoff task: online %d failed: errno %d\n", | ||
1442 | torture_type, cpu, ret); | ||
1443 | } else { | ||
1538 | if (verbose) | 1444 | if (verbose) |
1539 | pr_alert("%s" TORTURE_FLAG | 1445 | pr_alert("%s" TORTURE_FLAG |
1540 | "rcu_torture_onoff task: onlined %d\n", | 1446 | "rcu_torture_onoff task: onlined %d\n", |
@@ -1934,6 +1840,62 @@ rcu_torture_cleanup(void) | |||
1934 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); | 1840 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
1935 | } | 1841 | } |
1936 | 1842 | ||
1843 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
1844 | static void rcu_torture_leak_cb(struct rcu_head *rhp) | ||
1845 | { | ||
1846 | } | ||
1847 | |||
1848 | static void rcu_torture_err_cb(struct rcu_head *rhp) | ||
1849 | { | ||
1850 | /* | ||
1851 | * This -might- happen due to race conditions, but is unlikely. | ||
1852 | * The scenario that leads to this happening is that the | ||
1853 | * first of the pair of duplicate callbacks is queued, | ||
1854 | * someone else starts a grace period that includes that | ||
1855 | * callback, then the second of the pair must wait for the | ||
1856 | * next grace period. Unlikely, but can happen. If it | ||
1857 | * does happen, the debug-objects subsystem won't have splatted. | ||
1858 | */ | ||
1859 | pr_alert("rcutorture: duplicated callback was invoked.\n"); | ||
1860 | } | ||
1861 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
1862 | |||
1863 | /* | ||
1864 | * Verify that double-free causes debug-objects to complain, but only | ||
1865 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test | ||
1866 | * cannot be carried out. | ||
1867 | */ | ||
1868 | static void rcu_test_debug_objects(void) | ||
1869 | { | ||
1870 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
1871 | struct rcu_head rh1; | ||
1872 | struct rcu_head rh2; | ||
1873 | |||
1874 | init_rcu_head_on_stack(&rh1); | ||
1875 | init_rcu_head_on_stack(&rh2); | ||
1876 | pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n"); | ||
1877 | |||
1878 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ | ||
1879 | preempt_disable(); /* Prevent preemption from interrupting test. */ | ||
1880 | rcu_read_lock(); /* Make it impossible to finish a grace period. */ | ||
1881 | call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ | ||
1882 | local_irq_disable(); /* Make it harder to start a new grace period. */ | ||
1883 | call_rcu(&rh2, rcu_torture_leak_cb); | ||
1884 | call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ | ||
1885 | local_irq_enable(); | ||
1886 | rcu_read_unlock(); | ||
1887 | preempt_enable(); | ||
1888 | |||
1889 | /* Wait for them all to get done so we can safely return. */ | ||
1890 | rcu_barrier(); | ||
1891 | pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n"); | ||
1892 | destroy_rcu_head_on_stack(&rh1); | ||
1893 | destroy_rcu_head_on_stack(&rh2); | ||
1894 | #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
1895 | pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n"); | ||
1896 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
1897 | } | ||
1898 | |||
1937 | static int __init | 1899 | static int __init |
1938 | rcu_torture_init(void) | 1900 | rcu_torture_init(void) |
1939 | { | 1901 | { |
@@ -1941,11 +1903,9 @@ rcu_torture_init(void) | |||
1941 | int cpu; | 1903 | int cpu; |
1942 | int firsterr = 0; | 1904 | int firsterr = 0; |
1943 | int retval; | 1905 | int retval; |
1944 | static struct rcu_torture_ops *torture_ops[] = | 1906 | static struct rcu_torture_ops *torture_ops[] = { |
1945 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, | 1907 | &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops, |
1946 | &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops, | 1908 | }; |
1947 | &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops, | ||
1948 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; | ||
1949 | 1909 | ||
1950 | mutex_lock(&fullstop_mutex); | 1910 | mutex_lock(&fullstop_mutex); |
1951 | 1911 | ||
@@ -2163,6 +2123,8 @@ rcu_torture_init(void) | |||
2163 | firsterr = retval; | 2123 | firsterr = retval; |
2164 | goto unwind; | 2124 | goto unwind; |
2165 | } | 2125 | } |
2126 | if (object_debug) | ||
2127 | rcu_test_debug_objects(); | ||
2166 | rcutorture_record_test_transition(); | 2128 | rcutorture_record_test_transition(); |
2167 | mutex_unlock(&fullstop_mutex); | 2129 | mutex_unlock(&fullstop_mutex); |
2168 | return 0; | 2130 | return 0; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 068de3a93606..32618b3fe4e6 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -53,18 +53,38 @@ | |||
53 | #include <linux/delay.h> | 53 | #include <linux/delay.h> |
54 | #include <linux/stop_machine.h> | 54 | #include <linux/stop_machine.h> |
55 | #include <linux/random.h> | 55 | #include <linux/random.h> |
56 | #include <linux/ftrace_event.h> | ||
57 | #include <linux/suspend.h> | ||
56 | 58 | ||
57 | #include "rcutree.h" | 59 | #include "rcutree.h" |
58 | #include <trace/events/rcu.h> | 60 | #include <trace/events/rcu.h> |
59 | 61 | ||
60 | #include "rcu.h" | 62 | #include "rcu.h" |
61 | 63 | ||
64 | /* | ||
65 | * Strings used in tracepoints need to be exported via the | ||
66 | * tracing system such that tools like perf and trace-cmd can | ||
67 | * translate the string address pointers to actual text. | ||
68 | */ | ||
69 | #define TPS(x) tracepoint_string(x) | ||
70 | |||
62 | /* Data structures. */ | 71 | /* Data structures. */ |
63 | 72 | ||
64 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; | 73 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; |
65 | static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; | 74 | static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; |
66 | 75 | ||
67 | #define RCU_STATE_INITIALIZER(sname, sabbr, cr) { \ | 76 | /* |
77 | * In order to export the rcu_state name to the tracing tools, it | ||
78 | * needs to be added in the __tracepoint_string section. | ||
79 | * This requires defining a separate variable tp_<sname>_varname | ||
80 | * that points to the string being used, and this will allow | ||
81 | * the tracing userspace tools to be able to decipher the string | ||
82 | * address to the matching string. | ||
83 | */ | ||
84 | #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ | ||
85 | static char sname##_varname[] = #sname; \ | ||
86 | static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \ | ||
87 | struct rcu_state sname##_state = { \ | ||
68 | .level = { &sname##_state.node[0] }, \ | 88 | .level = { &sname##_state.node[0] }, \ |
69 | .call = cr, \ | 89 | .call = cr, \ |
70 | .fqs_state = RCU_GP_IDLE, \ | 90 | .fqs_state = RCU_GP_IDLE, \ |
@@ -75,16 +95,13 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; | |||
75 | .orphan_donetail = &sname##_state.orphan_donelist, \ | 95 | .orphan_donetail = &sname##_state.orphan_donelist, \ |
76 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ | 96 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ |
77 | .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \ | 97 | .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \ |
78 | .name = #sname, \ | 98 | .name = sname##_varname, \ |
79 | .abbr = sabbr, \ | 99 | .abbr = sabbr, \ |
80 | } | 100 | }; \ |
81 | 101 | DEFINE_PER_CPU(struct rcu_data, sname##_data) | |
82 | struct rcu_state rcu_sched_state = | ||
83 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); | ||
84 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | ||
85 | 102 | ||
86 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); | 103 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); |
87 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 104 | RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); |
88 | 105 | ||
89 | static struct rcu_state *rcu_state; | 106 | static struct rcu_state *rcu_state; |
90 | LIST_HEAD(rcu_struct_flavors); | 107 | LIST_HEAD(rcu_struct_flavors); |
@@ -178,7 +195,7 @@ void rcu_sched_qs(int cpu) | |||
178 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); | 195 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
179 | 196 | ||
180 | if (rdp->passed_quiesce == 0) | 197 | if (rdp->passed_quiesce == 0) |
181 | trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); | 198 | trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs")); |
182 | rdp->passed_quiesce = 1; | 199 | rdp->passed_quiesce = 1; |
183 | } | 200 | } |
184 | 201 | ||
@@ -187,7 +204,7 @@ void rcu_bh_qs(int cpu) | |||
187 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 204 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
188 | 205 | ||
189 | if (rdp->passed_quiesce == 0) | 206 | if (rdp->passed_quiesce == 0) |
190 | trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); | 207 | trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); |
191 | rdp->passed_quiesce = 1; | 208 | rdp->passed_quiesce = 1; |
192 | } | 209 | } |
193 | 210 | ||
@@ -198,16 +215,20 @@ void rcu_bh_qs(int cpu) | |||
198 | */ | 215 | */ |
199 | void rcu_note_context_switch(int cpu) | 216 | void rcu_note_context_switch(int cpu) |
200 | { | 217 | { |
201 | trace_rcu_utilization("Start context switch"); | 218 | trace_rcu_utilization(TPS("Start context switch")); |
202 | rcu_sched_qs(cpu); | 219 | rcu_sched_qs(cpu); |
203 | rcu_preempt_note_context_switch(cpu); | 220 | rcu_preempt_note_context_switch(cpu); |
204 | trace_rcu_utilization("End context switch"); | 221 | trace_rcu_utilization(TPS("End context switch")); |
205 | } | 222 | } |
206 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 223 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
207 | 224 | ||
208 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 225 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
209 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 226 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
210 | .dynticks = ATOMIC_INIT(1), | 227 | .dynticks = ATOMIC_INIT(1), |
228 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | ||
229 | .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, | ||
230 | .dynticks_idle = ATOMIC_INIT(1), | ||
231 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
211 | }; | 232 | }; |
212 | 233 | ||
213 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 234 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
@@ -226,7 +247,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644); | |||
226 | 247 | ||
227 | static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | 248 | static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, |
228 | struct rcu_data *rdp); | 249 | struct rcu_data *rdp); |
229 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)); | 250 | static void force_qs_rnp(struct rcu_state *rsp, |
251 | int (*f)(struct rcu_data *rsp, bool *isidle, | ||
252 | unsigned long *maxj), | ||
253 | bool *isidle, unsigned long *maxj); | ||
230 | static void force_quiescent_state(struct rcu_state *rsp); | 254 | static void force_quiescent_state(struct rcu_state *rsp); |
231 | static int rcu_pending(int cpu); | 255 | static int rcu_pending(int cpu); |
232 | 256 | ||
@@ -345,11 +369,11 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
345 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | 369 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
346 | bool user) | 370 | bool user) |
347 | { | 371 | { |
348 | trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); | 372 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); |
349 | if (!user && !is_idle_task(current)) { | 373 | if (!user && !is_idle_task(current)) { |
350 | struct task_struct *idle = idle_task(smp_processor_id()); | 374 | struct task_struct *idle = idle_task(smp_processor_id()); |
351 | 375 | ||
352 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); | 376 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); |
353 | ftrace_dump(DUMP_ORIG); | 377 | ftrace_dump(DUMP_ORIG); |
354 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 378 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
355 | current->pid, current->comm, | 379 | current->pid, current->comm, |
@@ -411,6 +435,7 @@ void rcu_idle_enter(void) | |||
411 | 435 | ||
412 | local_irq_save(flags); | 436 | local_irq_save(flags); |
413 | rcu_eqs_enter(false); | 437 | rcu_eqs_enter(false); |
438 | rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0); | ||
414 | local_irq_restore(flags); | 439 | local_irq_restore(flags); |
415 | } | 440 | } |
416 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 441 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
@@ -428,27 +453,6 @@ void rcu_user_enter(void) | |||
428 | { | 453 | { |
429 | rcu_eqs_enter(1); | 454 | rcu_eqs_enter(1); |
430 | } | 455 | } |
431 | |||
432 | /** | ||
433 | * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace | ||
434 | * after the current irq returns. | ||
435 | * | ||
436 | * This is similar to rcu_user_enter() but in the context of a non-nesting | ||
437 | * irq. After this call, RCU enters into idle mode when the interrupt | ||
438 | * returns. | ||
439 | */ | ||
440 | void rcu_user_enter_after_irq(void) | ||
441 | { | ||
442 | unsigned long flags; | ||
443 | struct rcu_dynticks *rdtp; | ||
444 | |||
445 | local_irq_save(flags); | ||
446 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
447 | /* Ensure this irq is interrupting a non-idle RCU state. */ | ||
448 | WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK)); | ||
449 | rdtp->dynticks_nesting = 1; | ||
450 | local_irq_restore(flags); | ||
451 | } | ||
452 | #endif /* CONFIG_RCU_USER_QS */ | 456 | #endif /* CONFIG_RCU_USER_QS */ |
453 | 457 | ||
454 | /** | 458 | /** |
@@ -479,9 +483,10 @@ void rcu_irq_exit(void) | |||
479 | rdtp->dynticks_nesting--; | 483 | rdtp->dynticks_nesting--; |
480 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); | 484 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); |
481 | if (rdtp->dynticks_nesting) | 485 | if (rdtp->dynticks_nesting) |
482 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | 486 | trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); |
483 | else | 487 | else |
484 | rcu_eqs_enter_common(rdtp, oldval, true); | 488 | rcu_eqs_enter_common(rdtp, oldval, true); |
489 | rcu_sysidle_enter(rdtp, 1); | ||
485 | local_irq_restore(flags); | 490 | local_irq_restore(flags); |
486 | } | 491 | } |
487 | 492 | ||
@@ -501,11 +506,11 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | |||
501 | smp_mb__after_atomic_inc(); /* See above. */ | 506 | smp_mb__after_atomic_inc(); /* See above. */ |
502 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 507 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
503 | rcu_cleanup_after_idle(smp_processor_id()); | 508 | rcu_cleanup_after_idle(smp_processor_id()); |
504 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); | 509 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); |
505 | if (!user && !is_idle_task(current)) { | 510 | if (!user && !is_idle_task(current)) { |
506 | struct task_struct *idle = idle_task(smp_processor_id()); | 511 | struct task_struct *idle = idle_task(smp_processor_id()); |
507 | 512 | ||
508 | trace_rcu_dyntick("Error on exit: not idle task", | 513 | trace_rcu_dyntick(TPS("Error on exit: not idle task"), |
509 | oldval, rdtp->dynticks_nesting); | 514 | oldval, rdtp->dynticks_nesting); |
510 | ftrace_dump(DUMP_ORIG); | 515 | ftrace_dump(DUMP_ORIG); |
511 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 516 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
@@ -550,6 +555,7 @@ void rcu_idle_exit(void) | |||
550 | 555 | ||
551 | local_irq_save(flags); | 556 | local_irq_save(flags); |
552 | rcu_eqs_exit(false); | 557 | rcu_eqs_exit(false); |
558 | rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0); | ||
553 | local_irq_restore(flags); | 559 | local_irq_restore(flags); |
554 | } | 560 | } |
555 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 561 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
@@ -565,28 +571,6 @@ void rcu_user_exit(void) | |||
565 | { | 571 | { |
566 | rcu_eqs_exit(1); | 572 | rcu_eqs_exit(1); |
567 | } | 573 | } |
568 | |||
569 | /** | ||
570 | * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace | ||
571 | * idle mode after the current non-nesting irq returns. | ||
572 | * | ||
573 | * This is similar to rcu_user_exit() but in the context of an irq. | ||
574 | * This is called when the irq has interrupted a userspace RCU idle mode | ||
575 | * context. When the current non-nesting interrupt returns after this call, | ||
576 | * the CPU won't restore the RCU idle mode. | ||
577 | */ | ||
578 | void rcu_user_exit_after_irq(void) | ||
579 | { | ||
580 | unsigned long flags; | ||
581 | struct rcu_dynticks *rdtp; | ||
582 | |||
583 | local_irq_save(flags); | ||
584 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
585 | /* Ensure we are interrupting an RCU idle mode. */ | ||
586 | WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK); | ||
587 | rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE; | ||
588 | local_irq_restore(flags); | ||
589 | } | ||
590 | #endif /* CONFIG_RCU_USER_QS */ | 574 | #endif /* CONFIG_RCU_USER_QS */ |
591 | 575 | ||
592 | /** | 576 | /** |
@@ -620,9 +604,10 @@ void rcu_irq_enter(void) | |||
620 | rdtp->dynticks_nesting++; | 604 | rdtp->dynticks_nesting++; |
621 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); | 605 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); |
622 | if (oldval) | 606 | if (oldval) |
623 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | 607 | trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); |
624 | else | 608 | else |
625 | rcu_eqs_exit_common(rdtp, oldval, true); | 609 | rcu_eqs_exit_common(rdtp, oldval, true); |
610 | rcu_sysidle_exit(rdtp, 1); | ||
626 | local_irq_restore(flags); | 611 | local_irq_restore(flags); |
627 | } | 612 | } |
628 | 613 | ||
@@ -746,9 +731,11 @@ static int rcu_is_cpu_rrupt_from_idle(void) | |||
746 | * credit them with an implicit quiescent state. Return 1 if this CPU | 731 | * credit them with an implicit quiescent state. Return 1 if this CPU |
747 | * is in dynticks idle mode, which is an extended quiescent state. | 732 | * is in dynticks idle mode, which is an extended quiescent state. |
748 | */ | 733 | */ |
749 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 734 | static int dyntick_save_progress_counter(struct rcu_data *rdp, |
735 | bool *isidle, unsigned long *maxj) | ||
750 | { | 736 | { |
751 | rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); | 737 | rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); |
738 | rcu_sysidle_check_cpu(rdp, isidle, maxj); | ||
752 | return (rdp->dynticks_snap & 0x1) == 0; | 739 | return (rdp->dynticks_snap & 0x1) == 0; |
753 | } | 740 | } |
754 | 741 | ||
@@ -758,7 +745,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) | |||
758 | * idle state since the last call to dyntick_save_progress_counter() | 745 | * idle state since the last call to dyntick_save_progress_counter() |
759 | * for this same CPU, or by virtue of having been offline. | 746 | * for this same CPU, or by virtue of having been offline. |
760 | */ | 747 | */ |
761 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | 748 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, |
749 | bool *isidle, unsigned long *maxj) | ||
762 | { | 750 | { |
763 | unsigned int curr; | 751 | unsigned int curr; |
764 | unsigned int snap; | 752 | unsigned int snap; |
@@ -775,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
775 | * of the current RCU grace period. | 763 | * of the current RCU grace period. |
776 | */ | 764 | */ |
777 | if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { | 765 | if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { |
778 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti"); | 766 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); |
779 | rdp->dynticks_fqs++; | 767 | rdp->dynticks_fqs++; |
780 | return 1; | 768 | return 1; |
781 | } | 769 | } |
@@ -795,7 +783,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
795 | return 0; /* Grace period is not old enough. */ | 783 | return 0; /* Grace period is not old enough. */ |
796 | barrier(); | 784 | barrier(); |
797 | if (cpu_is_offline(rdp->cpu)) { | 785 | if (cpu_is_offline(rdp->cpu)) { |
798 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | 786 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); |
799 | rdp->offline_fqs++; | 787 | rdp->offline_fqs++; |
800 | return 1; | 788 | return 1; |
801 | } | 789 | } |
@@ -1032,7 +1020,7 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp, | |||
1032 | * rcu_nocb_wait_gp(). | 1020 | * rcu_nocb_wait_gp(). |
1033 | */ | 1021 | */ |
1034 | static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | 1022 | static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, |
1035 | unsigned long c, char *s) | 1023 | unsigned long c, const char *s) |
1036 | { | 1024 | { |
1037 | trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, | 1025 | trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, |
1038 | rnp->completed, c, rnp->level, | 1026 | rnp->completed, c, rnp->level, |
@@ -1058,9 +1046,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp) | |||
1058 | * grace period is already marked as needed, return to the caller. | 1046 | * grace period is already marked as needed, return to the caller. |
1059 | */ | 1047 | */ |
1060 | c = rcu_cbs_completed(rdp->rsp, rnp); | 1048 | c = rcu_cbs_completed(rdp->rsp, rnp); |
1061 | trace_rcu_future_gp(rnp, rdp, c, "Startleaf"); | 1049 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); |
1062 | if (rnp->need_future_gp[c & 0x1]) { | 1050 | if (rnp->need_future_gp[c & 0x1]) { |
1063 | trace_rcu_future_gp(rnp, rdp, c, "Prestartleaf"); | 1051 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); |
1064 | return c; | 1052 | return c; |
1065 | } | 1053 | } |
1066 | 1054 | ||
@@ -1074,7 +1062,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp) | |||
1074 | if (rnp->gpnum != rnp->completed || | 1062 | if (rnp->gpnum != rnp->completed || |
1075 | ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) { | 1063 | ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) { |
1076 | rnp->need_future_gp[c & 0x1]++; | 1064 | rnp->need_future_gp[c & 0x1]++; |
1077 | trace_rcu_future_gp(rnp, rdp, c, "Startedleaf"); | 1065 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); |
1078 | return c; | 1066 | return c; |
1079 | } | 1067 | } |
1080 | 1068 | ||
@@ -1102,7 +1090,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp) | |||
1102 | * recorded, trace and leave. | 1090 | * recorded, trace and leave. |
1103 | */ | 1091 | */ |
1104 | if (rnp_root->need_future_gp[c & 0x1]) { | 1092 | if (rnp_root->need_future_gp[c & 0x1]) { |
1105 | trace_rcu_future_gp(rnp, rdp, c, "Prestartedroot"); | 1093 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); |
1106 | goto unlock_out; | 1094 | goto unlock_out; |
1107 | } | 1095 | } |
1108 | 1096 | ||
@@ -1111,9 +1099,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp) | |||
1111 | 1099 | ||
1112 | /* If a grace period is not already in progress, start one. */ | 1100 | /* If a grace period is not already in progress, start one. */ |
1113 | if (rnp_root->gpnum != rnp_root->completed) { | 1101 | if (rnp_root->gpnum != rnp_root->completed) { |
1114 | trace_rcu_future_gp(rnp, rdp, c, "Startedleafroot"); | 1102 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); |
1115 | } else { | 1103 | } else { |
1116 | trace_rcu_future_gp(rnp, rdp, c, "Startedroot"); | 1104 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); |
1117 | rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); | 1105 | rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); |
1118 | } | 1106 | } |
1119 | unlock_out: | 1107 | unlock_out: |
@@ -1137,7 +1125,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | |||
1137 | rcu_nocb_gp_cleanup(rsp, rnp); | 1125 | rcu_nocb_gp_cleanup(rsp, rnp); |
1138 | rnp->need_future_gp[c & 0x1] = 0; | 1126 | rnp->need_future_gp[c & 0x1] = 0; |
1139 | needmore = rnp->need_future_gp[(c + 1) & 0x1]; | 1127 | needmore = rnp->need_future_gp[(c + 1) & 0x1]; |
1140 | trace_rcu_future_gp(rnp, rdp, c, needmore ? "CleanupMore" : "Cleanup"); | 1128 | trace_rcu_future_gp(rnp, rdp, c, |
1129 | needmore ? TPS("CleanupMore") : TPS("Cleanup")); | ||
1141 | return needmore; | 1130 | return needmore; |
1142 | } | 1131 | } |
1143 | 1132 | ||
@@ -1205,9 +1194,9 @@ static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1205 | 1194 | ||
1206 | /* Trace depending on how much we were able to accelerate. */ | 1195 | /* Trace depending on how much we were able to accelerate. */ |
1207 | if (!*rdp->nxttail[RCU_WAIT_TAIL]) | 1196 | if (!*rdp->nxttail[RCU_WAIT_TAIL]) |
1208 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB"); | 1197 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); |
1209 | else | 1198 | else |
1210 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB"); | 1199 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); |
1211 | } | 1200 | } |
1212 | 1201 | ||
1213 | /* | 1202 | /* |
@@ -1273,7 +1262,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc | |||
1273 | 1262 | ||
1274 | /* Remember that we saw this grace-period completion. */ | 1263 | /* Remember that we saw this grace-period completion. */ |
1275 | rdp->completed = rnp->completed; | 1264 | rdp->completed = rnp->completed; |
1276 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend"); | 1265 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); |
1277 | } | 1266 | } |
1278 | 1267 | ||
1279 | if (rdp->gpnum != rnp->gpnum) { | 1268 | if (rdp->gpnum != rnp->gpnum) { |
@@ -1283,7 +1272,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc | |||
1283 | * go looking for one. | 1272 | * go looking for one. |
1284 | */ | 1273 | */ |
1285 | rdp->gpnum = rnp->gpnum; | 1274 | rdp->gpnum = rnp->gpnum; |
1286 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); | 1275 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); |
1287 | rdp->passed_quiesce = 0; | 1276 | rdp->passed_quiesce = 0; |
1288 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); | 1277 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); |
1289 | zero_cpu_stall_ticks(rdp); | 1278 | zero_cpu_stall_ticks(rdp); |
@@ -1315,6 +1304,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1315 | struct rcu_data *rdp; | 1304 | struct rcu_data *rdp; |
1316 | struct rcu_node *rnp = rcu_get_root(rsp); | 1305 | struct rcu_node *rnp = rcu_get_root(rsp); |
1317 | 1306 | ||
1307 | rcu_bind_gp_kthread(); | ||
1318 | raw_spin_lock_irq(&rnp->lock); | 1308 | raw_spin_lock_irq(&rnp->lock); |
1319 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ | 1309 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ |
1320 | 1310 | ||
@@ -1326,7 +1316,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1326 | 1316 | ||
1327 | /* Advance to a new grace period and initialize state. */ | 1317 | /* Advance to a new grace period and initialize state. */ |
1328 | rsp->gpnum++; | 1318 | rsp->gpnum++; |
1329 | trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); | 1319 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); |
1330 | record_gp_stall_check_time(rsp); | 1320 | record_gp_stall_check_time(rsp); |
1331 | raw_spin_unlock_irq(&rnp->lock); | 1321 | raw_spin_unlock_irq(&rnp->lock); |
1332 | 1322 | ||
@@ -1379,16 +1369,25 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1379 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | 1369 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) |
1380 | { | 1370 | { |
1381 | int fqs_state = fqs_state_in; | 1371 | int fqs_state = fqs_state_in; |
1372 | bool isidle = false; | ||
1373 | unsigned long maxj; | ||
1382 | struct rcu_node *rnp = rcu_get_root(rsp); | 1374 | struct rcu_node *rnp = rcu_get_root(rsp); |
1383 | 1375 | ||
1384 | rsp->n_force_qs++; | 1376 | rsp->n_force_qs++; |
1385 | if (fqs_state == RCU_SAVE_DYNTICK) { | 1377 | if (fqs_state == RCU_SAVE_DYNTICK) { |
1386 | /* Collect dyntick-idle snapshots. */ | 1378 | /* Collect dyntick-idle snapshots. */ |
1387 | force_qs_rnp(rsp, dyntick_save_progress_counter); | 1379 | if (is_sysidle_rcu_state(rsp)) { |
1380 | isidle = 1; | ||
1381 | maxj = jiffies - ULONG_MAX / 4; | ||
1382 | } | ||
1383 | force_qs_rnp(rsp, dyntick_save_progress_counter, | ||
1384 | &isidle, &maxj); | ||
1385 | rcu_sysidle_report_gp(rsp, isidle, maxj); | ||
1388 | fqs_state = RCU_FORCE_QS; | 1386 | fqs_state = RCU_FORCE_QS; |
1389 | } else { | 1387 | } else { |
1390 | /* Handle dyntick-idle and offline CPUs. */ | 1388 | /* Handle dyntick-idle and offline CPUs. */ |
1391 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs); | 1389 | isidle = 0; |
1390 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); | ||
1392 | } | 1391 | } |
1393 | /* Clear flag to prevent immediate re-entry. */ | 1392 | /* Clear flag to prevent immediate re-entry. */ |
1394 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 1393 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
@@ -1448,7 +1447,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1448 | rcu_nocb_gp_set(rnp, nocb); | 1447 | rcu_nocb_gp_set(rnp, nocb); |
1449 | 1448 | ||
1450 | rsp->completed = rsp->gpnum; /* Declare grace period done. */ | 1449 | rsp->completed = rsp->gpnum; /* Declare grace period done. */ |
1451 | trace_rcu_grace_period(rsp->name, rsp->completed, "end"); | 1450 | trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); |
1452 | rsp->fqs_state = RCU_GP_IDLE; | 1451 | rsp->fqs_state = RCU_GP_IDLE; |
1453 | rdp = this_cpu_ptr(rsp->rda); | 1452 | rdp = this_cpu_ptr(rsp->rda); |
1454 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ | 1453 | rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */ |
@@ -1558,10 +1557,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1558 | 1557 | ||
1559 | /* | 1558 | /* |
1560 | * We can't do wakeups while holding the rnp->lock, as that | 1559 | * We can't do wakeups while holding the rnp->lock, as that |
1561 | * could cause possible deadlocks with the rq->lock. Deter | 1560 | * could cause possible deadlocks with the rq->lock. Defer |
1562 | * the wakeup to interrupt context. | 1561 | * the wakeup to interrupt context. And don't bother waking |
1562 | * up the running kthread. | ||
1563 | */ | 1563 | */ |
1564 | irq_work_queue(&rsp->wakeup_work); | 1564 | if (current != rsp->gp_kthread) |
1565 | irq_work_queue(&rsp->wakeup_work); | ||
1565 | } | 1566 | } |
1566 | 1567 | ||
1567 | /* | 1568 | /* |
@@ -1857,7 +1858,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
1857 | RCU_TRACE(mask = rdp->grpmask); | 1858 | RCU_TRACE(mask = rdp->grpmask); |
1858 | trace_rcu_grace_period(rsp->name, | 1859 | trace_rcu_grace_period(rsp->name, |
1859 | rnp->gpnum + 1 - !!(rnp->qsmask & mask), | 1860 | rnp->gpnum + 1 - !!(rnp->qsmask & mask), |
1860 | "cpuofl"); | 1861 | TPS("cpuofl")); |
1861 | } | 1862 | } |
1862 | 1863 | ||
1863 | /* | 1864 | /* |
@@ -2044,7 +2045,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2044 | */ | 2045 | */ |
2045 | void rcu_check_callbacks(int cpu, int user) | 2046 | void rcu_check_callbacks(int cpu, int user) |
2046 | { | 2047 | { |
2047 | trace_rcu_utilization("Start scheduler-tick"); | 2048 | trace_rcu_utilization(TPS("Start scheduler-tick")); |
2048 | increment_cpu_stall_ticks(); | 2049 | increment_cpu_stall_ticks(); |
2049 | if (user || rcu_is_cpu_rrupt_from_idle()) { | 2050 | if (user || rcu_is_cpu_rrupt_from_idle()) { |
2050 | 2051 | ||
@@ -2077,7 +2078,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
2077 | rcu_preempt_check_callbacks(cpu); | 2078 | rcu_preempt_check_callbacks(cpu); |
2078 | if (rcu_pending(cpu)) | 2079 | if (rcu_pending(cpu)) |
2079 | invoke_rcu_core(); | 2080 | invoke_rcu_core(); |
2080 | trace_rcu_utilization("End scheduler-tick"); | 2081 | trace_rcu_utilization(TPS("End scheduler-tick")); |
2081 | } | 2082 | } |
2082 | 2083 | ||
2083 | /* | 2084 | /* |
@@ -2087,7 +2088,10 @@ void rcu_check_callbacks(int cpu, int user) | |||
2087 | * | 2088 | * |
2088 | * The caller must have suppressed start of new grace periods. | 2089 | * The caller must have suppressed start of new grace periods. |
2089 | */ | 2090 | */ |
2090 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | 2091 | static void force_qs_rnp(struct rcu_state *rsp, |
2092 | int (*f)(struct rcu_data *rsp, bool *isidle, | ||
2093 | unsigned long *maxj), | ||
2094 | bool *isidle, unsigned long *maxj) | ||
2091 | { | 2095 | { |
2092 | unsigned long bit; | 2096 | unsigned long bit; |
2093 | int cpu; | 2097 | int cpu; |
@@ -2110,9 +2114,12 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
2110 | cpu = rnp->grplo; | 2114 | cpu = rnp->grplo; |
2111 | bit = 1; | 2115 | bit = 1; |
2112 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { | 2116 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
2113 | if ((rnp->qsmask & bit) != 0 && | 2117 | if ((rnp->qsmask & bit) != 0) { |
2114 | f(per_cpu_ptr(rsp->rda, cpu))) | 2118 | if ((rnp->qsmaskinit & bit) != 0) |
2115 | mask |= bit; | 2119 | *isidle = 0; |
2120 | if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) | ||
2121 | mask |= bit; | ||
2122 | } | ||
2116 | } | 2123 | } |
2117 | if (mask != 0) { | 2124 | if (mask != 0) { |
2118 | 2125 | ||
@@ -2208,10 +2215,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
2208 | 2215 | ||
2209 | if (cpu_is_offline(smp_processor_id())) | 2216 | if (cpu_is_offline(smp_processor_id())) |
2210 | return; | 2217 | return; |
2211 | trace_rcu_utilization("Start RCU core"); | 2218 | trace_rcu_utilization(TPS("Start RCU core")); |
2212 | for_each_rcu_flavor(rsp) | 2219 | for_each_rcu_flavor(rsp) |
2213 | __rcu_process_callbacks(rsp); | 2220 | __rcu_process_callbacks(rsp); |
2214 | trace_rcu_utilization("End RCU core"); | 2221 | trace_rcu_utilization(TPS("End RCU core")); |
2215 | } | 2222 | } |
2216 | 2223 | ||
2217 | /* | 2224 | /* |
@@ -2287,6 +2294,13 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, | |||
2287 | } | 2294 | } |
2288 | 2295 | ||
2289 | /* | 2296 | /* |
2297 | * RCU callback function to leak a callback. | ||
2298 | */ | ||
2299 | static void rcu_leak_callback(struct rcu_head *rhp) | ||
2300 | { | ||
2301 | } | ||
2302 | |||
2303 | /* | ||
2290 | * Helper function for call_rcu() and friends. The cpu argument will | 2304 | * Helper function for call_rcu() and friends. The cpu argument will |
2291 | * normally be -1, indicating "currently running CPU". It may specify | 2305 | * normally be -1, indicating "currently running CPU". It may specify |
2292 | * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() | 2306 | * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() |
@@ -2300,7 +2314,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
2300 | struct rcu_data *rdp; | 2314 | struct rcu_data *rdp; |
2301 | 2315 | ||
2302 | WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */ | 2316 | WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */ |
2303 | debug_rcu_head_queue(head); | 2317 | if (debug_rcu_head_queue(head)) { |
2318 | /* Probable double call_rcu(), so leak the callback. */ | ||
2319 | ACCESS_ONCE(head->func) = rcu_leak_callback; | ||
2320 | WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); | ||
2321 | return; | ||
2322 | } | ||
2304 | head->func = func; | 2323 | head->func = func; |
2305 | head->next = NULL; | 2324 | head->next = NULL; |
2306 | 2325 | ||
@@ -2720,7 +2739,7 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy) | |||
2720 | * Helper function for _rcu_barrier() tracing. If tracing is disabled, | 2739 | * Helper function for _rcu_barrier() tracing. If tracing is disabled, |
2721 | * the compiler is expected to optimize this away. | 2740 | * the compiler is expected to optimize this away. |
2722 | */ | 2741 | */ |
2723 | static void _rcu_barrier_trace(struct rcu_state *rsp, char *s, | 2742 | static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, |
2724 | int cpu, unsigned long done) | 2743 | int cpu, unsigned long done) |
2725 | { | 2744 | { |
2726 | trace_rcu_barrier(rsp->name, s, cpu, | 2745 | trace_rcu_barrier(rsp->name, s, cpu, |
@@ -2785,9 +2804,20 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2785 | * transition. The "if" expression below therefore rounds the old | 2804 | * transition. The "if" expression below therefore rounds the old |
2786 | * value up to the next even number and adds two before comparing. | 2805 | * value up to the next even number and adds two before comparing. |
2787 | */ | 2806 | */ |
2788 | snap_done = ACCESS_ONCE(rsp->n_barrier_done); | 2807 | snap_done = rsp->n_barrier_done; |
2789 | _rcu_barrier_trace(rsp, "Check", -1, snap_done); | 2808 | _rcu_barrier_trace(rsp, "Check", -1, snap_done); |
2790 | if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { | 2809 | |
2810 | /* | ||
2811 | * If the value in snap is odd, we needed to wait for the current | ||
2812 | * rcu_barrier() to complete, then wait for the next one, in other | ||
2813 | * words, we need the value of snap_done to be three larger than | ||
2814 | * the value of snap. On the other hand, if the value in snap is | ||
2815 | * even, we only had to wait for the next rcu_barrier() to complete, | ||
2816 | * in other words, we need the value of snap_done to be only two | ||
2817 | * greater than the value of snap. The "(snap + 3) & ~0x1" computes | ||
2818 | * this for us (thank you, Linus!). | ||
2819 | */ | ||
2820 | if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) { | ||
2791 | _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); | 2821 | _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); |
2792 | smp_mb(); /* caller's subsequent code after above check. */ | 2822 | smp_mb(); /* caller's subsequent code after above check. */ |
2793 | mutex_unlock(&rsp->barrier_mutex); | 2823 | mutex_unlock(&rsp->barrier_mutex); |
@@ -2930,6 +2960,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2930 | rdp->blimit = blimit; | 2960 | rdp->blimit = blimit; |
2931 | init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ | 2961 | init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ |
2932 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 2962 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
2963 | rcu_sysidle_init_percpu_data(rdp->dynticks); | ||
2933 | atomic_set(&rdp->dynticks->dynticks, | 2964 | atomic_set(&rdp->dynticks->dynticks, |
2934 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); | 2965 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); |
2935 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 2966 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
@@ -2952,7 +2983,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2952 | rdp->completed = rnp->completed; | 2983 | rdp->completed = rnp->completed; |
2953 | rdp->passed_quiesce = 0; | 2984 | rdp->passed_quiesce = 0; |
2954 | rdp->qs_pending = 0; | 2985 | rdp->qs_pending = 0; |
2955 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); | 2986 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); |
2956 | } | 2987 | } |
2957 | raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ | 2988 | raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ |
2958 | rnp = rnp->parent; | 2989 | rnp = rnp->parent; |
@@ -2982,7 +3013,7 @@ static int rcu_cpu_notify(struct notifier_block *self, | |||
2982 | struct rcu_node *rnp = rdp->mynode; | 3013 | struct rcu_node *rnp = rdp->mynode; |
2983 | struct rcu_state *rsp; | 3014 | struct rcu_state *rsp; |
2984 | 3015 | ||
2985 | trace_rcu_utilization("Start CPU hotplug"); | 3016 | trace_rcu_utilization(TPS("Start CPU hotplug")); |
2986 | switch (action) { | 3017 | switch (action) { |
2987 | case CPU_UP_PREPARE: | 3018 | case CPU_UP_PREPARE: |
2988 | case CPU_UP_PREPARE_FROZEN: | 3019 | case CPU_UP_PREPARE_FROZEN: |
@@ -3011,7 +3042,26 @@ static int rcu_cpu_notify(struct notifier_block *self, | |||
3011 | default: | 3042 | default: |
3012 | break; | 3043 | break; |
3013 | } | 3044 | } |
3014 | trace_rcu_utilization("End CPU hotplug"); | 3045 | trace_rcu_utilization(TPS("End CPU hotplug")); |
3046 | return NOTIFY_OK; | ||
3047 | } | ||
3048 | |||
3049 | static int rcu_pm_notify(struct notifier_block *self, | ||
3050 | unsigned long action, void *hcpu) | ||
3051 | { | ||
3052 | switch (action) { | ||
3053 | case PM_HIBERNATION_PREPARE: | ||
3054 | case PM_SUSPEND_PREPARE: | ||
3055 | if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ | ||
3056 | rcu_expedited = 1; | ||
3057 | break; | ||
3058 | case PM_POST_HIBERNATION: | ||
3059 | case PM_POST_SUSPEND: | ||
3060 | rcu_expedited = 0; | ||
3061 | break; | ||
3062 | default: | ||
3063 | break; | ||
3064 | } | ||
3015 | return NOTIFY_OK; | 3065 | return NOTIFY_OK; |
3016 | } | 3066 | } |
3017 | 3067 | ||
@@ -3256,6 +3306,7 @@ void __init rcu_init(void) | |||
3256 | * or the scheduler are operational. | 3306 | * or the scheduler are operational. |
3257 | */ | 3307 | */ |
3258 | cpu_notifier(rcu_cpu_notify, 0); | 3308 | cpu_notifier(rcu_cpu_notify, 0); |
3309 | pm_notifier(rcu_pm_notify, 0); | ||
3259 | for_each_online_cpu(cpu) | 3310 | for_each_online_cpu(cpu) |
3260 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 3311 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
3261 | } | 3312 | } |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index b3832581043c..5f97eab602cd 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -88,6 +88,14 @@ struct rcu_dynticks { | |||
88 | /* Process level is worth LLONG_MAX/2. */ | 88 | /* Process level is worth LLONG_MAX/2. */ |
89 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | 89 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
90 | atomic_t dynticks; /* Even value for idle, else odd. */ | 90 | atomic_t dynticks; /* Even value for idle, else odd. */ |
91 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | ||
92 | long long dynticks_idle_nesting; | ||
93 | /* irq/process nesting level from idle. */ | ||
94 | atomic_t dynticks_idle; /* Even value for idle, else odd. */ | ||
95 | /* "Idle" excludes userspace execution. */ | ||
96 | unsigned long dynticks_idle_jiffies; | ||
97 | /* End of last non-NMI non-idle period. */ | ||
98 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
91 | #ifdef CONFIG_RCU_FAST_NO_HZ | 99 | #ifdef CONFIG_RCU_FAST_NO_HZ |
92 | bool all_lazy; /* Are all CPU's CBs lazy? */ | 100 | bool all_lazy; /* Are all CPU's CBs lazy? */ |
93 | unsigned long nonlazy_posted; | 101 | unsigned long nonlazy_posted; |
@@ -445,7 +453,7 @@ struct rcu_state { | |||
445 | /* for CPU stalls. */ | 453 | /* for CPU stalls. */ |
446 | unsigned long gp_max; /* Maximum GP duration in */ | 454 | unsigned long gp_max; /* Maximum GP duration in */ |
447 | /* jiffies. */ | 455 | /* jiffies. */ |
448 | char *name; /* Name of structure. */ | 456 | const char *name; /* Name of structure. */ |
449 | char abbr; /* Abbreviated name. */ | 457 | char abbr; /* Abbreviated name. */ |
450 | struct list_head flavors; /* List of RCU flavors. */ | 458 | struct list_head flavors; /* List of RCU flavors. */ |
451 | struct irq_work wakeup_work; /* Postponed wakeups */ | 459 | struct irq_work wakeup_work; /* Postponed wakeups */ |
@@ -545,6 +553,15 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); | |||
545 | static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); | 553 | static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); |
546 | static void rcu_kick_nohz_cpu(int cpu); | 554 | static void rcu_kick_nohz_cpu(int cpu); |
547 | static bool init_nocb_callback_list(struct rcu_data *rdp); | 555 | static bool init_nocb_callback_list(struct rcu_data *rdp); |
556 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); | ||
557 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); | ||
558 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | ||
559 | unsigned long *maxj); | ||
560 | static bool is_sysidle_rcu_state(struct rcu_state *rsp); | ||
561 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | ||
562 | unsigned long maxj); | ||
563 | static void rcu_bind_gp_kthread(void); | ||
564 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); | ||
548 | 565 | ||
549 | #endif /* #ifndef RCU_TREE_NONCORE */ | 566 | #endif /* #ifndef RCU_TREE_NONCORE */ |
550 | 567 | ||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 769e12e3151b..130c97b027f2 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/gfp.h> | 28 | #include <linux/gfp.h> |
29 | #include <linux/oom.h> | 29 | #include <linux/oom.h> |
30 | #include <linux/smpboot.h> | 30 | #include <linux/smpboot.h> |
31 | #include <linux/tick.h> | 31 | #include "time/tick-internal.h" |
32 | 32 | ||
33 | #define RCU_KTHREAD_PRIO 1 | 33 | #define RCU_KTHREAD_PRIO 1 |
34 | 34 | ||
@@ -110,9 +110,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
110 | 110 | ||
111 | #ifdef CONFIG_TREE_PREEMPT_RCU | 111 | #ifdef CONFIG_TREE_PREEMPT_RCU |
112 | 112 | ||
113 | struct rcu_state rcu_preempt_state = | 113 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); |
114 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); | ||
115 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | ||
116 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 114 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
117 | 115 | ||
118 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 116 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
@@ -169,7 +167,7 @@ static void rcu_preempt_qs(int cpu) | |||
169 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 167 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
170 | 168 | ||
171 | if (rdp->passed_quiesce == 0) | 169 | if (rdp->passed_quiesce == 0) |
172 | trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); | 170 | trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); |
173 | rdp->passed_quiesce = 1; | 171 | rdp->passed_quiesce = 1; |
174 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 172 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
175 | } | 173 | } |
@@ -388,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
388 | np = rcu_next_node_entry(t, rnp); | 386 | np = rcu_next_node_entry(t, rnp); |
389 | list_del_init(&t->rcu_node_entry); | 387 | list_del_init(&t->rcu_node_entry); |
390 | t->rcu_blocked_node = NULL; | 388 | t->rcu_blocked_node = NULL; |
391 | trace_rcu_unlock_preempted_task("rcu_preempt", | 389 | trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), |
392 | rnp->gpnum, t->pid); | 390 | rnp->gpnum, t->pid); |
393 | if (&t->rcu_node_entry == rnp->gp_tasks) | 391 | if (&t->rcu_node_entry == rnp->gp_tasks) |
394 | rnp->gp_tasks = np; | 392 | rnp->gp_tasks = np; |
@@ -412,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
412 | */ | 410 | */ |
413 | empty_exp_now = !rcu_preempted_readers_exp(rnp); | 411 | empty_exp_now = !rcu_preempted_readers_exp(rnp); |
414 | if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { | 412 | if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { |
415 | trace_rcu_quiescent_state_report("preempt_rcu", | 413 | trace_rcu_quiescent_state_report(TPS("preempt_rcu"), |
416 | rnp->gpnum, | 414 | rnp->gpnum, |
417 | 0, rnp->qsmask, | 415 | 0, rnp->qsmask, |
418 | rnp->level, | 416 | rnp->level, |
@@ -1250,12 +1248,12 @@ static int rcu_boost_kthread(void *arg) | |||
1250 | int spincnt = 0; | 1248 | int spincnt = 0; |
1251 | int more2boost; | 1249 | int more2boost; |
1252 | 1250 | ||
1253 | trace_rcu_utilization("Start boost kthread@init"); | 1251 | trace_rcu_utilization(TPS("Start boost kthread@init")); |
1254 | for (;;) { | 1252 | for (;;) { |
1255 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; | 1253 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
1256 | trace_rcu_utilization("End boost kthread@rcu_wait"); | 1254 | trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); |
1257 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); | 1255 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); |
1258 | trace_rcu_utilization("Start boost kthread@rcu_wait"); | 1256 | trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); |
1259 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; | 1257 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
1260 | more2boost = rcu_boost(rnp); | 1258 | more2boost = rcu_boost(rnp); |
1261 | if (more2boost) | 1259 | if (more2boost) |
@@ -1264,14 +1262,14 @@ static int rcu_boost_kthread(void *arg) | |||
1264 | spincnt = 0; | 1262 | spincnt = 0; |
1265 | if (spincnt > 10) { | 1263 | if (spincnt > 10) { |
1266 | rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; | 1264 | rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; |
1267 | trace_rcu_utilization("End boost kthread@rcu_yield"); | 1265 | trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); |
1268 | schedule_timeout_interruptible(2); | 1266 | schedule_timeout_interruptible(2); |
1269 | trace_rcu_utilization("Start boost kthread@rcu_yield"); | 1267 | trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); |
1270 | spincnt = 0; | 1268 | spincnt = 0; |
1271 | } | 1269 | } |
1272 | } | 1270 | } |
1273 | /* NOTREACHED */ | 1271 | /* NOTREACHED */ |
1274 | trace_rcu_utilization("End boost kthread@notreached"); | 1272 | trace_rcu_utilization(TPS("End boost kthread@notreached")); |
1275 | return 0; | 1273 | return 0; |
1276 | } | 1274 | } |
1277 | 1275 | ||
@@ -1419,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu) | |||
1419 | int spincnt; | 1417 | int spincnt; |
1420 | 1418 | ||
1421 | for (spincnt = 0; spincnt < 10; spincnt++) { | 1419 | for (spincnt = 0; spincnt < 10; spincnt++) { |
1422 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); | 1420 | trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); |
1423 | local_bh_disable(); | 1421 | local_bh_disable(); |
1424 | *statusp = RCU_KTHREAD_RUNNING; | 1422 | *statusp = RCU_KTHREAD_RUNNING; |
1425 | this_cpu_inc(rcu_cpu_kthread_loops); | 1423 | this_cpu_inc(rcu_cpu_kthread_loops); |
@@ -1431,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu) | |||
1431 | rcu_kthread_do_work(); | 1429 | rcu_kthread_do_work(); |
1432 | local_bh_enable(); | 1430 | local_bh_enable(); |
1433 | if (*workp == 0) { | 1431 | if (*workp == 0) { |
1434 | trace_rcu_utilization("End CPU kthread@rcu_wait"); | 1432 | trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); |
1435 | *statusp = RCU_KTHREAD_WAITING; | 1433 | *statusp = RCU_KTHREAD_WAITING; |
1436 | return; | 1434 | return; |
1437 | } | 1435 | } |
1438 | } | 1436 | } |
1439 | *statusp = RCU_KTHREAD_YIELDING; | 1437 | *statusp = RCU_KTHREAD_YIELDING; |
1440 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); | 1438 | trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); |
1441 | schedule_timeout_interruptible(2); | 1439 | schedule_timeout_interruptible(2); |
1442 | trace_rcu_utilization("End CPU kthread@rcu_yield"); | 1440 | trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); |
1443 | *statusp = RCU_KTHREAD_WAITING; | 1441 | *statusp = RCU_KTHREAD_WAITING; |
1444 | } | 1442 | } |
1445 | 1443 | ||
@@ -2202,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2202 | * Wait for the grace period. Do so interruptibly to avoid messing | 2200 | * Wait for the grace period. Do so interruptibly to avoid messing |
2203 | * up the load average. | 2201 | * up the load average. |
2204 | */ | 2202 | */ |
2205 | trace_rcu_future_gp(rnp, rdp, c, "StartWait"); | 2203 | trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); |
2206 | for (;;) { | 2204 | for (;;) { |
2207 | wait_event_interruptible( | 2205 | wait_event_interruptible( |
2208 | rnp->nocb_gp_wq[c & 0x1], | 2206 | rnp->nocb_gp_wq[c & 0x1], |
@@ -2210,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2210 | if (likely(d)) | 2208 | if (likely(d)) |
2211 | break; | 2209 | break; |
2212 | flush_signals(current); | 2210 | flush_signals(current); |
2213 | trace_rcu_future_gp(rnp, rdp, c, "ResumeWait"); | 2211 | trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); |
2214 | } | 2212 | } |
2215 | trace_rcu_future_gp(rnp, rdp, c, "EndWait"); | 2213 | trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); |
2216 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ | 2214 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ |
2217 | } | 2215 | } |
2218 | 2216 | ||
@@ -2375,3 +2373,425 @@ static void rcu_kick_nohz_cpu(int cpu) | |||
2375 | smp_send_reschedule(cpu); | 2373 | smp_send_reschedule(cpu); |
2376 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ | 2374 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ |
2377 | } | 2375 | } |
2376 | |||
2377 | |||
2378 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | ||
2379 | |||
2380 | /* | ||
2381 | * Define RCU flavor that holds sysidle state. This needs to be the | ||
2382 | * most active flavor of RCU. | ||
2383 | */ | ||
2384 | #ifdef CONFIG_PREEMPT_RCU | ||
2385 | static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state; | ||
2386 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
2387 | static struct rcu_state *rcu_sysidle_state = &rcu_sched_state; | ||
2388 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
2389 | |||
2390 | static int full_sysidle_state; /* Current system-idle state. */ | ||
2391 | #define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */ | ||
2392 | #define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */ | ||
2393 | #define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */ | ||
2394 | #define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */ | ||
2395 | #define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */ | ||
2396 | |||
2397 | /* | ||
2398 | * Invoked to note exit from irq or task transition to idle. Note that | ||
2399 | * usermode execution does -not- count as idle here! After all, we want | ||
2400 | * to detect full-system idle states, not RCU quiescent states and grace | ||
2401 | * periods. The caller must have disabled interrupts. | ||
2402 | */ | ||
2403 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) | ||
2404 | { | ||
2405 | unsigned long j; | ||
2406 | |||
2407 | /* Adjust nesting, check for fully idle. */ | ||
2408 | if (irq) { | ||
2409 | rdtp->dynticks_idle_nesting--; | ||
2410 | WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); | ||
2411 | if (rdtp->dynticks_idle_nesting != 0) | ||
2412 | return; /* Still not fully idle. */ | ||
2413 | } else { | ||
2414 | if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) == | ||
2415 | DYNTICK_TASK_NEST_VALUE) { | ||
2416 | rdtp->dynticks_idle_nesting = 0; | ||
2417 | } else { | ||
2418 | rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
2419 | WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); | ||
2420 | return; /* Still not fully idle. */ | ||
2421 | } | ||
2422 | } | ||
2423 | |||
2424 | /* Record start of fully idle period. */ | ||
2425 | j = jiffies; | ||
2426 | ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; | ||
2427 | smp_mb__before_atomic_inc(); | ||
2428 | atomic_inc(&rdtp->dynticks_idle); | ||
2429 | smp_mb__after_atomic_inc(); | ||
2430 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); | ||
2431 | } | ||
2432 | |||
2433 | /* | ||
2434 | * Unconditionally force exit from full system-idle state. This is | ||
2435 | * invoked when a normal CPU exits idle, but must be called separately | ||
2436 | * for the timekeeping CPU (tick_do_timer_cpu). The reason for this | ||
2437 | * is that the timekeeping CPU is permitted to take scheduling-clock | ||
2438 | * interrupts while the system is in system-idle state, and of course | ||
2439 | * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock | ||
2440 | * interrupt from any other type of interrupt. | ||
2441 | */ | ||
2442 | void rcu_sysidle_force_exit(void) | ||
2443 | { | ||
2444 | int oldstate = ACCESS_ONCE(full_sysidle_state); | ||
2445 | int newoldstate; | ||
2446 | |||
2447 | /* | ||
2448 | * Each pass through the following loop attempts to exit full | ||
2449 | * system-idle state. If contention proves to be a problem, | ||
2450 | * a trylock-based contention tree could be used here. | ||
2451 | */ | ||
2452 | while (oldstate > RCU_SYSIDLE_SHORT) { | ||
2453 | newoldstate = cmpxchg(&full_sysidle_state, | ||
2454 | oldstate, RCU_SYSIDLE_NOT); | ||
2455 | if (oldstate == newoldstate && | ||
2456 | oldstate == RCU_SYSIDLE_FULL_NOTED) { | ||
2457 | rcu_kick_nohz_cpu(tick_do_timer_cpu); | ||
2458 | return; /* We cleared it, done! */ | ||
2459 | } | ||
2460 | oldstate = newoldstate; | ||
2461 | } | ||
2462 | smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */ | ||
2463 | } | ||
2464 | |||
2465 | /* | ||
2466 | * Invoked to note entry to irq or task transition from idle. Note that | ||
2467 | * usermode execution does -not- count as idle here! The caller must | ||
2468 | * have disabled interrupts. | ||
2469 | */ | ||
2470 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) | ||
2471 | { | ||
2472 | /* Adjust nesting, check for already non-idle. */ | ||
2473 | if (irq) { | ||
2474 | rdtp->dynticks_idle_nesting++; | ||
2475 | WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); | ||
2476 | if (rdtp->dynticks_idle_nesting != 1) | ||
2477 | return; /* Already non-idle. */ | ||
2478 | } else { | ||
2479 | /* | ||
2480 | * Allow for irq misnesting. Yes, it really is possible | ||
2481 | * to enter an irq handler then never leave it, and maybe | ||
2482 | * also vice versa. Handle both possibilities. | ||
2483 | */ | ||
2484 | if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) { | ||
2485 | rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE; | ||
2486 | WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); | ||
2487 | return; /* Already non-idle. */ | ||
2488 | } else { | ||
2489 | rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
2490 | } | ||
2491 | } | ||
2492 | |||
2493 | /* Record end of idle period. */ | ||
2494 | smp_mb__before_atomic_inc(); | ||
2495 | atomic_inc(&rdtp->dynticks_idle); | ||
2496 | smp_mb__after_atomic_inc(); | ||
2497 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); | ||
2498 | |||
2499 | /* | ||
2500 | * If we are the timekeeping CPU, we are permitted to be non-idle | ||
2501 | * during a system-idle state. This must be the case, because | ||
2502 | * the timekeeping CPU has to take scheduling-clock interrupts | ||
2503 | * during the time that the system is transitioning to full | ||
2504 | * system-idle state. This means that the timekeeping CPU must | ||
2505 | * invoke rcu_sysidle_force_exit() directly if it does anything | ||
2506 | * more than take a scheduling-clock interrupt. | ||
2507 | */ | ||
2508 | if (smp_processor_id() == tick_do_timer_cpu) | ||
2509 | return; | ||
2510 | |||
2511 | /* Update system-idle state: We are clearly no longer fully idle! */ | ||
2512 | rcu_sysidle_force_exit(); | ||
2513 | } | ||
2514 | |||
2515 | /* | ||
2516 | * Check to see if the current CPU is idle. Note that usermode execution | ||
2517 | * does not count as idle. The caller must have disabled interrupts. | ||
2518 | */ | ||
2519 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | ||
2520 | unsigned long *maxj) | ||
2521 | { | ||
2522 | int cur; | ||
2523 | unsigned long j; | ||
2524 | struct rcu_dynticks *rdtp = rdp->dynticks; | ||
2525 | |||
2526 | /* | ||
2527 | * If some other CPU has already reported non-idle, if this is | ||
2528 | * not the flavor of RCU that tracks sysidle state, or if this | ||
2529 | * is an offline or the timekeeping CPU, nothing to do. | ||
2530 | */ | ||
2531 | if (!*isidle || rdp->rsp != rcu_sysidle_state || | ||
2532 | cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) | ||
2533 | return; | ||
2534 | if (rcu_gp_in_progress(rdp->rsp)) | ||
2535 | WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); | ||
2536 | |||
2537 | /* Pick up current idle and NMI-nesting counter and check. */ | ||
2538 | cur = atomic_read(&rdtp->dynticks_idle); | ||
2539 | if (cur & 0x1) { | ||
2540 | *isidle = false; /* We are not idle! */ | ||
2541 | return; | ||
2542 | } | ||
2543 | smp_mb(); /* Read counters before timestamps. */ | ||
2544 | |||
2545 | /* Pick up timestamps. */ | ||
2546 | j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); | ||
2547 | /* If this CPU entered idle more recently, update maxj timestamp. */ | ||
2548 | if (ULONG_CMP_LT(*maxj, j)) | ||
2549 | *maxj = j; | ||
2550 | } | ||
2551 | |||
2552 | /* | ||
2553 | * Is this the flavor of RCU that is handling full-system idle? | ||
2554 | */ | ||
2555 | static bool is_sysidle_rcu_state(struct rcu_state *rsp) | ||
2556 | { | ||
2557 | return rsp == rcu_sysidle_state; | ||
2558 | } | ||
2559 | |||
2560 | /* | ||
2561 | * Bind the grace-period kthread for the sysidle flavor of RCU to the | ||
2562 | * timekeeping CPU. | ||
2563 | */ | ||
2564 | static void rcu_bind_gp_kthread(void) | ||
2565 | { | ||
2566 | int cpu = ACCESS_ONCE(tick_do_timer_cpu); | ||
2567 | |||
2568 | if (cpu < 0 || cpu >= nr_cpu_ids) | ||
2569 | return; | ||
2570 | if (raw_smp_processor_id() != cpu) | ||
2571 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
2572 | } | ||
2573 | |||
2574 | /* | ||
2575 | * Return a delay in jiffies based on the number of CPUs, rcu_node | ||
2576 | * leaf fanout, and jiffies tick rate. The idea is to allow larger | ||
2577 | * systems more time to transition to full-idle state in order to | ||
2578 | * avoid the cache thrashing that otherwise occur on the state variable. | ||
2579 | * Really small systems (less than a couple of tens of CPUs) should | ||
2580 | * instead use a single global atomically incremented counter, and later | ||
2581 | * versions of this will automatically reconfigure themselves accordingly. | ||
2582 | */ | ||
2583 | static unsigned long rcu_sysidle_delay(void) | ||
2584 | { | ||
2585 | if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) | ||
2586 | return 0; | ||
2587 | return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); | ||
2588 | } | ||
2589 | |||
2590 | /* | ||
2591 | * Advance the full-system-idle state. This is invoked when all of | ||
2592 | * the non-timekeeping CPUs are idle. | ||
2593 | */ | ||
2594 | static void rcu_sysidle(unsigned long j) | ||
2595 | { | ||
2596 | /* Check the current state. */ | ||
2597 | switch (ACCESS_ONCE(full_sysidle_state)) { | ||
2598 | case RCU_SYSIDLE_NOT: | ||
2599 | |||
2600 | /* First time all are idle, so note a short idle period. */ | ||
2601 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; | ||
2602 | break; | ||
2603 | |||
2604 | case RCU_SYSIDLE_SHORT: | ||
2605 | |||
2606 | /* | ||
2607 | * Idle for a bit, time to advance to next state? | ||
2608 | * cmpxchg failure means race with non-idle, let them win. | ||
2609 | */ | ||
2610 | if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) | ||
2611 | (void)cmpxchg(&full_sysidle_state, | ||
2612 | RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG); | ||
2613 | break; | ||
2614 | |||
2615 | case RCU_SYSIDLE_LONG: | ||
2616 | |||
2617 | /* | ||
2618 | * Do an additional check pass before advancing to full. | ||
2619 | * cmpxchg failure means race with non-idle, let them win. | ||
2620 | */ | ||
2621 | if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) | ||
2622 | (void)cmpxchg(&full_sysidle_state, | ||
2623 | RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL); | ||
2624 | break; | ||
2625 | |||
2626 | default: | ||
2627 | break; | ||
2628 | } | ||
2629 | } | ||
2630 | |||
2631 | /* | ||
2632 | * Found a non-idle non-timekeeping CPU, so kick the system-idle state | ||
2633 | * back to the beginning. | ||
2634 | */ | ||
2635 | static void rcu_sysidle_cancel(void) | ||
2636 | { | ||
2637 | smp_mb(); | ||
2638 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; | ||
2639 | } | ||
2640 | |||
2641 | /* | ||
2642 | * Update the sysidle state based on the results of a force-quiescent-state | ||
2643 | * scan of the CPUs' dyntick-idle state. | ||
2644 | */ | ||
2645 | static void rcu_sysidle_report(struct rcu_state *rsp, int isidle, | ||
2646 | unsigned long maxj, bool gpkt) | ||
2647 | { | ||
2648 | if (rsp != rcu_sysidle_state) | ||
2649 | return; /* Wrong flavor, ignore. */ | ||
2650 | if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) | ||
2651 | return; /* Running state machine from timekeeping CPU. */ | ||
2652 | if (isidle) | ||
2653 | rcu_sysidle(maxj); /* More idle! */ | ||
2654 | else | ||
2655 | rcu_sysidle_cancel(); /* Idle is over. */ | ||
2656 | } | ||
2657 | |||
2658 | /* | ||
2659 | * Wrapper for rcu_sysidle_report() when called from the grace-period | ||
2660 | * kthread's context. | ||
2661 | */ | ||
2662 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | ||
2663 | unsigned long maxj) | ||
2664 | { | ||
2665 | rcu_sysidle_report(rsp, isidle, maxj, true); | ||
2666 | } | ||
2667 | |||
2668 | /* Callback and function for forcing an RCU grace period. */ | ||
2669 | struct rcu_sysidle_head { | ||
2670 | struct rcu_head rh; | ||
2671 | int inuse; | ||
2672 | }; | ||
2673 | |||
2674 | static void rcu_sysidle_cb(struct rcu_head *rhp) | ||
2675 | { | ||
2676 | struct rcu_sysidle_head *rshp; | ||
2677 | |||
2678 | /* | ||
2679 | * The following memory barrier is needed to replace the | ||
2680 | * memory barriers that would normally be in the memory | ||
2681 | * allocator. | ||
2682 | */ | ||
2683 | smp_mb(); /* grace period precedes setting inuse. */ | ||
2684 | |||
2685 | rshp = container_of(rhp, struct rcu_sysidle_head, rh); | ||
2686 | ACCESS_ONCE(rshp->inuse) = 0; | ||
2687 | } | ||
2688 | |||
2689 | /* | ||
2690 | * Check to see if the system is fully idle, other than the timekeeping CPU. | ||
2691 | * The caller must have disabled interrupts. | ||
2692 | */ | ||
2693 | bool rcu_sys_is_idle(void) | ||
2694 | { | ||
2695 | static struct rcu_sysidle_head rsh; | ||
2696 | int rss = ACCESS_ONCE(full_sysidle_state); | ||
2697 | |||
2698 | if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) | ||
2699 | return false; | ||
2700 | |||
2701 | /* Handle small-system case by doing a full scan of CPUs. */ | ||
2702 | if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { | ||
2703 | int oldrss = rss - 1; | ||
2704 | |||
2705 | /* | ||
2706 | * One pass to advance to each state up to _FULL. | ||
2707 | * Give up if any pass fails to advance the state. | ||
2708 | */ | ||
2709 | while (rss < RCU_SYSIDLE_FULL && oldrss < rss) { | ||
2710 | int cpu; | ||
2711 | bool isidle = true; | ||
2712 | unsigned long maxj = jiffies - ULONG_MAX / 4; | ||
2713 | struct rcu_data *rdp; | ||
2714 | |||
2715 | /* Scan all the CPUs looking for nonidle CPUs. */ | ||
2716 | for_each_possible_cpu(cpu) { | ||
2717 | rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu); | ||
2718 | rcu_sysidle_check_cpu(rdp, &isidle, &maxj); | ||
2719 | if (!isidle) | ||
2720 | break; | ||
2721 | } | ||
2722 | rcu_sysidle_report(rcu_sysidle_state, | ||
2723 | isidle, maxj, false); | ||
2724 | oldrss = rss; | ||
2725 | rss = ACCESS_ONCE(full_sysidle_state); | ||
2726 | } | ||
2727 | } | ||
2728 | |||
2729 | /* If this is the first observation of an idle period, record it. */ | ||
2730 | if (rss == RCU_SYSIDLE_FULL) { | ||
2731 | rss = cmpxchg(&full_sysidle_state, | ||
2732 | RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED); | ||
2733 | return rss == RCU_SYSIDLE_FULL; | ||
2734 | } | ||
2735 | |||
2736 | smp_mb(); /* ensure rss load happens before later caller actions. */ | ||
2737 | |||
2738 | /* If already fully idle, tell the caller (in case of races). */ | ||
2739 | if (rss == RCU_SYSIDLE_FULL_NOTED) | ||
2740 | return true; | ||
2741 | |||
2742 | /* | ||
2743 | * If we aren't there yet, and a grace period is not in flight, | ||
2744 | * initiate a grace period. Either way, tell the caller that | ||
2745 | * we are not there yet. We use an xchg() rather than an assignment | ||
2746 | * to make up for the memory barriers that would otherwise be | ||
2747 | * provided by the memory allocator. | ||
2748 | */ | ||
2749 | if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && | ||
2750 | !rcu_gp_in_progress(rcu_sysidle_state) && | ||
2751 | !rsh.inuse && xchg(&rsh.inuse, 1) == 0) | ||
2752 | call_rcu(&rsh.rh, rcu_sysidle_cb); | ||
2753 | return false; | ||
2754 | } | ||
2755 | |||
2756 | /* | ||
2757 | * Initialize dynticks sysidle state for CPUs coming online. | ||
2758 | */ | ||
2759 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) | ||
2760 | { | ||
2761 | rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE; | ||
2762 | } | ||
2763 | |||
2764 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
2765 | |||
2766 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) | ||
2767 | { | ||
2768 | } | ||
2769 | |||
2770 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) | ||
2771 | { | ||
2772 | } | ||
2773 | |||
2774 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | ||
2775 | unsigned long *maxj) | ||
2776 | { | ||
2777 | } | ||
2778 | |||
2779 | static bool is_sysidle_rcu_state(struct rcu_state *rsp) | ||
2780 | { | ||
2781 | return false; | ||
2782 | } | ||
2783 | |||
2784 | static void rcu_bind_gp_kthread(void) | ||
2785 | { | ||
2786 | } | ||
2787 | |||
2788 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | ||
2789 | unsigned long maxj) | ||
2790 | { | ||
2791 | } | ||
2792 | |||
2793 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) | ||
2794 | { | ||
2795 | } | ||
2796 | |||
2797 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | ||
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 70f27e89012b..3381f098070f 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
@@ -134,6 +134,56 @@ config NO_HZ_FULL_ALL | |||
134 | Note the boot CPU will still be kept outside the range to | 134 | Note the boot CPU will still be kept outside the range to |
135 | handle the timekeeping duty. | 135 | handle the timekeeping duty. |
136 | 136 | ||
137 | config NO_HZ_FULL_SYSIDLE | ||
138 | bool "Detect full-system idle state for full dynticks system" | ||
139 | depends on NO_HZ_FULL | ||
140 | default n | ||
141 | help | ||
142 | At least one CPU must keep the scheduling-clock tick running for | ||
143 | timekeeping purposes whenever there is a non-idle CPU, where | ||
144 | "non-idle" also includes dynticks CPUs as long as they are | ||
145 | running non-idle tasks. Because the underlying adaptive-tick | ||
146 | support cannot distinguish between all CPUs being idle and | ||
147 | all CPUs each running a single task in dynticks mode, the | ||
148 | underlying support simply ensures that there is always a CPU | ||
149 | handling the scheduling-clock tick, whether or not all CPUs | ||
150 | are idle. This Kconfig option enables scalable detection of | ||
151 | the all-CPUs-idle state, thus allowing the scheduling-clock | ||
152 | tick to be disabled when all CPUs are idle. Note that scalable | ||
153 | detection of the all-CPUs-idle state means that larger systems | ||
154 | will be slower to declare the all-CPUs-idle state. | ||
155 | |||
156 | Say Y if you would like to help debug all-CPUs-idle detection. | ||
157 | |||
158 | Say N if you are unsure. | ||
159 | |||
160 | config NO_HZ_FULL_SYSIDLE_SMALL | ||
161 | int "Number of CPUs above which large-system approach is used" | ||
162 | depends on NO_HZ_FULL_SYSIDLE | ||
163 | range 1 NR_CPUS | ||
164 | default 8 | ||
165 | help | ||
166 | The full-system idle detection mechanism takes a lazy approach | ||
167 | on large systems, as is required to attain decent scalability. | ||
168 | However, on smaller systems, scalability is not anywhere near as | ||
169 | large a concern as is energy efficiency. The sysidle subsystem | ||
170 | therefore uses a fast but non-scalable algorithm for small | ||
171 | systems and a lazier but scalable algorithm for large systems. | ||
172 | This Kconfig parameter defines the number of CPUs in the largest | ||
173 | system that will be considered to be "small". | ||
174 | |||
175 | The default value will be fine in most cases. Battery-powered | ||
176 | systems that (1) enable NO_HZ_FULL_SYSIDLE, (2) have larger | ||
177 | numbers of CPUs, and (3) are suffering from battery-lifetime | ||
178 | problems due to long sysidle latencies might wish to experiment | ||
179 | with larger values for this Kconfig parameter. On the other | ||
180 | hand, they might be even better served by disabling NO_HZ_FULL | ||
181 | entirely, given that NO_HZ_FULL is intended for HPC and | ||
182 | real-time workloads that at present do not tend to be run on | ||
183 | battery-powered systems. | ||
184 | |||
185 | Take the default if you are unsure. | ||
186 | |||
137 | config NO_HZ | 187 | config NO_HZ |
138 | bool "Old Idle dynticks config" | 188 | bool "Old Idle dynticks config" |
139 | depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS | 189 | depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index afaae41b0a02..fe39acd4c1aa 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1022,6 +1022,9 @@ extern struct list_head ftrace_events; | |||
1022 | extern const char *__start___trace_bprintk_fmt[]; | 1022 | extern const char *__start___trace_bprintk_fmt[]; |
1023 | extern const char *__stop___trace_bprintk_fmt[]; | 1023 | extern const char *__stop___trace_bprintk_fmt[]; |
1024 | 1024 | ||
1025 | extern const char *__start___tracepoint_str[]; | ||
1026 | extern const char *__stop___tracepoint_str[]; | ||
1027 | |||
1025 | void trace_printk_init_buffers(void); | 1028 | void trace_printk_init_buffers(void); |
1026 | void trace_printk_start_comm(void); | 1029 | void trace_printk_start_comm(void); |
1027 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | 1030 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index a9077c1b4ad3..2900817ba65c 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -244,12 +244,31 @@ static const char **find_next(void *v, loff_t *pos) | |||
244 | { | 244 | { |
245 | const char **fmt = v; | 245 | const char **fmt = v; |
246 | int start_index; | 246 | int start_index; |
247 | int last_index; | ||
247 | 248 | ||
248 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; | 249 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; |
249 | 250 | ||
250 | if (*pos < start_index) | 251 | if (*pos < start_index) |
251 | return __start___trace_bprintk_fmt + *pos; | 252 | return __start___trace_bprintk_fmt + *pos; |
252 | 253 | ||
254 | /* | ||
255 | * The __tracepoint_str section is treated the same as the | ||
256 | * __trace_printk_fmt section. The difference is that the | ||
257 | * __trace_printk_fmt section should only be used by trace_printk() | ||
258 | * in a debugging environment, as if anything exists in that section | ||
259 | * the trace_prink() helper buffers are allocated, which would just | ||
260 | * waste space in a production environment. | ||
261 | * | ||
262 | * The __tracepoint_str sections on the other hand are used by | ||
263 | * tracepoints which need to map pointers to their strings to | ||
264 | * the ASCII text for userspace. | ||
265 | */ | ||
266 | last_index = start_index; | ||
267 | start_index = __stop___tracepoint_str - __start___tracepoint_str; | ||
268 | |||
269 | if (*pos < last_index + start_index) | ||
270 | return __start___tracepoint_str + (*pos - last_index); | ||
271 | |||
253 | return find_next_mod_format(start_index, v, fmt, pos); | 272 | return find_next_mod_format(start_index, v, fmt, pos); |
254 | } | 273 | } |
255 | 274 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 37061ede8b81..bf2c8b1043d8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -381,19 +381,21 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | |||
381 | * debug_object_activate - debug checks when an object is activated | 381 | * debug_object_activate - debug checks when an object is activated |
382 | * @addr: address of the object | 382 | * @addr: address of the object |
383 | * @descr: pointer to an object specific debug description structure | 383 | * @descr: pointer to an object specific debug description structure |
384 | * Returns 0 for success, -EINVAL for check failed. | ||
384 | */ | 385 | */ |
385 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) | 386 | int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
386 | { | 387 | { |
387 | enum debug_obj_state state; | 388 | enum debug_obj_state state; |
388 | struct debug_bucket *db; | 389 | struct debug_bucket *db; |
389 | struct debug_obj *obj; | 390 | struct debug_obj *obj; |
390 | unsigned long flags; | 391 | unsigned long flags; |
392 | int ret; | ||
391 | struct debug_obj o = { .object = addr, | 393 | struct debug_obj o = { .object = addr, |
392 | .state = ODEBUG_STATE_NOTAVAILABLE, | 394 | .state = ODEBUG_STATE_NOTAVAILABLE, |
393 | .descr = descr }; | 395 | .descr = descr }; |
394 | 396 | ||
395 | if (!debug_objects_enabled) | 397 | if (!debug_objects_enabled) |
396 | return; | 398 | return 0; |
397 | 399 | ||
398 | db = get_bucket((unsigned long) addr); | 400 | db = get_bucket((unsigned long) addr); |
399 | 401 | ||
@@ -405,23 +407,26 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
405 | case ODEBUG_STATE_INIT: | 407 | case ODEBUG_STATE_INIT: |
406 | case ODEBUG_STATE_INACTIVE: | 408 | case ODEBUG_STATE_INACTIVE: |
407 | obj->state = ODEBUG_STATE_ACTIVE; | 409 | obj->state = ODEBUG_STATE_ACTIVE; |
410 | ret = 0; | ||
408 | break; | 411 | break; |
409 | 412 | ||
410 | case ODEBUG_STATE_ACTIVE: | 413 | case ODEBUG_STATE_ACTIVE: |
411 | debug_print_object(obj, "activate"); | 414 | debug_print_object(obj, "activate"); |
412 | state = obj->state; | 415 | state = obj->state; |
413 | raw_spin_unlock_irqrestore(&db->lock, flags); | 416 | raw_spin_unlock_irqrestore(&db->lock, flags); |
414 | debug_object_fixup(descr->fixup_activate, addr, state); | 417 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
415 | return; | 418 | return ret ? -EINVAL : 0; |
416 | 419 | ||
417 | case ODEBUG_STATE_DESTROYED: | 420 | case ODEBUG_STATE_DESTROYED: |
418 | debug_print_object(obj, "activate"); | 421 | debug_print_object(obj, "activate"); |
422 | ret = -EINVAL; | ||
419 | break; | 423 | break; |
420 | default: | 424 | default: |
425 | ret = 0; | ||
421 | break; | 426 | break; |
422 | } | 427 | } |
423 | raw_spin_unlock_irqrestore(&db->lock, flags); | 428 | raw_spin_unlock_irqrestore(&db->lock, flags); |
424 | return; | 429 | return ret; |
425 | } | 430 | } |
426 | 431 | ||
427 | raw_spin_unlock_irqrestore(&db->lock, flags); | 432 | raw_spin_unlock_irqrestore(&db->lock, flags); |
@@ -431,8 +436,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
431 | * true or not. | 436 | * true or not. |
432 | */ | 437 | */ |
433 | if (debug_object_fixup(descr->fixup_activate, addr, | 438 | if (debug_object_fixup(descr->fixup_activate, addr, |
434 | ODEBUG_STATE_NOTAVAILABLE)) | 439 | ODEBUG_STATE_NOTAVAILABLE)) { |
435 | debug_print_object(&o, "activate"); | 440 | debug_print_object(&o, "activate"); |
441 | return -EINVAL; | ||
442 | } | ||
443 | return 0; | ||
436 | } | 444 | } |
437 | 445 | ||
438 | /** | 446 | /** |