summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-02-13 02:36:18 -0500
committerIngo Molnar <mingo@kernel.org>2019-02-13 02:36:18 -0500
commitcae45e1c6c541283a1bd155aa7b0a57e353b4df4 (patch)
treeba87fdd6b47cb2937fc1b6d69ef0595a08646556
parentaa0c38cf39de73bf7360a3da8f1707601261e518 (diff)
parente7ffb4eb9a6d89678e7f62461737899f88dab64e (diff)
Merge branch 'rcu-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull the latest RCU tree from Paul E. McKenney: - Additional cleanups after RCU flavor consolidation - Grace-period forward-progress cleanups and improvements - Documentation updates - Miscellaneous fixes - spin_is_locked() conversions to lockdep - SPDX changes to RCU source and header files - SRCU updates - Torture-test updates, including nolibc updates and moving nolibc to tools/include Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg18
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html26
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html6
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg2
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg6
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html20
-rw-r--r--Documentation/RCU/stallwarn.txt15
-rw-r--r--Documentation/RCU/torture.txt169
-rw-r--r--Documentation/RCU/whatisRCU.txt4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt32
-rw-r--r--MAINTAINERS6
-rw-r--r--include/asm-generic/bug.h3
-rw-r--r--include/linux/rcu_node_tree.h17
-rw-r--r--include/linux/rcu_segcblist.h17
-rw-r--r--include/linux/rcu_sync.h15
-rw-r--r--include/linux/rcupdate.h91
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/srcu.h18
-rw-r--r--include/linux/srcutiny.h17
-rw-r--r--include/linux/srcutree.h20
-rw-r--r--include/linux/torture.h20
-rw-r--r--kernel/locking/locktorture.c21
-rw-r--r--kernel/rcu/rcu.h21
-rw-r--r--kernel/rcu/rcu_segcblist.c17
-rw-r--r--kernel/rcu/rcu_segcblist.h17
-rw-r--r--kernel/rcu/rcuperf.c27
-rw-r--r--kernel/rcu/rcutorture.c59
-rw-r--r--kernel/rcu/srcutiny.c17
-rw-r--r--kernel/rcu/srcutree.c72
-rw-r--r--kernel/rcu/sync.c15
-rw-r--r--kernel/rcu/tiny.c19
-rw-r--r--kernel/rcu/tree.c267
-rw-r--r--kernel/rcu/tree.h53
-rw-r--r--kernel/rcu/tree_exp.h201
-rw-r--r--kernel/rcu/tree_plugin.h238
-rw-r--r--kernel/rcu/update.c17
-rw-r--r--kernel/sched/cpufreq.c4
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/topology.c4
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/torture.c25
-rw-r--r--tools/include/nolibc/nolibc.h (renamed from tools/testing/selftests/rcutorture/bin/nolibc.h)118
-rw-r--r--tools/memory-model/.gitignore1
-rw-r--r--tools/memory-model/README2
-rw-r--r--tools/memory-model/linux-kernel.bell3
-rw-r--r--tools/memory-model/linux-kernel.cat4
-rw-r--r--tools/memory-model/linux-kernel.def1
-rw-r--r--tools/memory-model/scripts/README70
-rwxr-xr-xtools/memory-model/scripts/checkalllitmus.sh53
-rw-r--r--tools/memory-model/scripts/checkghlitmus.sh65
-rwxr-xr-xtools/memory-model/scripts/checklitmus.sh74
-rw-r--r--tools/memory-model/scripts/checklitmushist.sh60
-rw-r--r--tools/memory-model/scripts/cmplitmushist.sh87
-rw-r--r--tools/memory-model/scripts/initlitmushist.sh68
-rw-r--r--tools/memory-model/scripts/judgelitmus.sh78
-rw-r--r--tools/memory-model/scripts/newlitmushist.sh61
-rw-r--r--tools/memory-model/scripts/parseargs.sh136
-rw-r--r--tools/memory-model/scripts/runlitmushist.sh87
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh27
-rw-r--r--virt/kvm/kvm_main.c2
63 files changed, 1395 insertions, 1268 deletions
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
index e4233ac93c2b..6189ffcc6aff 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
@@ -328,13 +328,13 @@
328 inkscape:window-height="1148" 328 inkscape:window-height="1148"
329 id="namedview90" 329 id="namedview90"
330 showgrid="true" 330 showgrid="true"
331 inkscape:zoom="0.80021373" 331 inkscape:zoom="0.69092787"
332 inkscape:cx="462.49289" 332 inkscape:cx="476.34085"
333 inkscape:cy="473.6718" 333 inkscape:cy="712.80957"
334 inkscape:window-x="770" 334 inkscape:window-x="770"
335 inkscape:window-y="24" 335 inkscape:window-y="24"
336 inkscape:window-maximized="0" 336 inkscape:window-maximized="0"
337 inkscape:current-layer="g4114-9-3-9" 337 inkscape:current-layer="g4"
338 inkscape:snap-grids="false" 338 inkscape:snap-grids="false"
339 fit-margin-top="5" 339 fit-margin-top="5"
340 fit-margin-right="5" 340 fit-margin-right="5"
@@ -813,14 +813,18 @@
813 <text 813 <text
814 sodipodi:linespacing="125%" 814 sodipodi:linespacing="125%"
815 id="text4110-5-7-6-2-4-0" 815 id="text4110-5-7-6-2-4-0"
816 y="841.88086" 816 y="670.74316"
817 x="1460.1007" 817 x="1460.1007"
818 style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans" 818 style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
819 xml:space="preserve"><tspan 819 xml:space="preserve"><tspan
820 y="841.88086" 820 y="670.74316"
821 x="1460.1007"
822 sodipodi:role="line"
823 id="tspan4925-1-2-4-5">Request</tspan><tspan
824 y="1004.7976"
821 x="1460.1007" 825 x="1460.1007"
822 sodipodi:role="line" 826 sodipodi:role="line"
823 id="tspan4925-1-2-4-5">reched_cpu()</tspan></text> 827 id="tspan3100">context switch</tspan></text>
824 </g> 828 </g>
825 </g> 829 </g>
826</svg> 830</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 8e4f873b979f..19e7a5fb6b73 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -72,10 +72,10 @@ will ignore it because idle and offline CPUs are already residing
72in quiescent states. 72in quiescent states.
73Otherwise, the expedited grace period will use 73Otherwise, the expedited grace period will use
74<tt>smp_call_function_single()</tt> to send the CPU an IPI, which 74<tt>smp_call_function_single()</tt> to send the CPU an IPI, which
75is handled by <tt>sync_rcu_exp_handler()</tt>. 75is handled by <tt>rcu_exp_handler()</tt>.
76 76
77<p> 77<p>
78However, because this is preemptible RCU, <tt>sync_rcu_exp_handler()</tt> 78However, because this is preemptible RCU, <tt>rcu_exp_handler()</tt>
79can check to see if the CPU is currently running in an RCU read-side 79can check to see if the CPU is currently running in an RCU read-side
80critical section. 80critical section.
81If not, the handler can immediately report a quiescent state. 81If not, the handler can immediately report a quiescent state.
@@ -145,19 +145,18 @@ expedited grace period is shown in the following diagram:
145<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%"> 145<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
146 146
147<p> 147<p>
148As with RCU-preempt's <tt>synchronize_rcu_expedited()</tt>, 148As with RCU-preempt, RCU-sched's
149<tt>synchronize_sched_expedited()</tt> ignores offline and 149<tt>synchronize_sched_expedited()</tt> ignores offline and
150idle CPUs, again because they are in remotely detectable 150idle CPUs, again because they are in remotely detectable
151quiescent states. 151quiescent states.
152However, the <tt>synchronize_rcu_expedited()</tt> handler 152However, because the
153is <tt>sync_sched_exp_handler()</tt>, and because the
154<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt> 153<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
155leave no trace of their invocation, in general it is not possible to tell 154leave no trace of their invocation, in general it is not possible to tell
156whether or not the current CPU is in an RCU read-side critical section. 155whether or not the current CPU is in an RCU read-side critical section.
157The best that <tt>sync_sched_exp_handler()</tt> can do is to check 156The best that RCU-sched's <tt>rcu_exp_handler()</tt> can do is to check
158for idle, on the off-chance that the CPU went idle while the IPI 157for idle, on the off-chance that the CPU went idle while the IPI
159was in flight. 158was in flight.
160If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports 159If the CPU is idle, then <tt>rcu_exp_handler()</tt> reports
161the quiescent state. 160the quiescent state.
162 161
163<p> Otherwise, the handler forces a future context switch by setting the 162<p> Otherwise, the handler forces a future context switch by setting the
@@ -298,19 +297,18 @@ Instead, the task pushing the grace period forward will include the
298idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>. 297idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>.
299 298
300<p> 299<p>
301For RCU-sched, there is an additional check for idle in the IPI 300For RCU-sched, there is an additional check:
302handler, <tt>sync_sched_exp_handler()</tt>.
303If the IPI has interrupted the idle loop, then 301If the IPI has interrupted the idle loop, then
304<tt>sync_sched_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt> 302<tt>rcu_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
305to report the corresponding quiescent state. 303to report the corresponding quiescent state.
306 304
307<p> 305<p>
308For RCU-preempt, there is no specific check for idle in the 306For RCU-preempt, there is no specific check for idle in the
309IPI handler (<tt>sync_rcu_exp_handler()</tt>), but because 307IPI handler (<tt>rcu_exp_handler()</tt>), but because
310RCU read-side critical sections are not permitted within the 308RCU read-side critical sections are not permitted within the
311idle loop, if <tt>sync_rcu_exp_handler()</tt> sees that the CPU is within 309idle loop, if <tt>rcu_exp_handler()</tt> sees that the CPU is within
312RCU read-side critical section, the CPU cannot possibly be idle. 310RCU read-side critical section, the CPU cannot possibly be idle.
313Otherwise, <tt>sync_rcu_exp_handler()</tt> invokes 311Otherwise, <tt>rcu_exp_handler()</tt> invokes
314<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent 312<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent
315state, regardless of whether or not that quiescent state was due to 313state, regardless of whether or not that quiescent state was due to
316the CPU being idle. 314the CPU being idle.
@@ -625,6 +623,8 @@ checks, but only during the mid-boot dead zone.
625<p> 623<p>
626With this refinement, synchronous grace periods can now be used from 624With this refinement, synchronous grace periods can now be used from
627task context pretty much any time during the life of the kernel. 625task context pretty much any time during the life of the kernel.
626That is, aside from some points in the suspend, hibernate, or shutdown
627code path.
628 628
629<h3><a name="Summary"> 629<h3><a name="Summary">
630Summary</a></h3> 630Summary</a></h3>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index e4d94fba6c89..8d21af02b1f0 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -485,13 +485,13 @@ section that the grace period must wait on.
485noted by <tt>rcu_node_context_switch()</tt> on the left. 485noted by <tt>rcu_node_context_switch()</tt> on the left.
486On the other hand, if the CPU takes a scheduler-clock interrupt 486On the other hand, if the CPU takes a scheduler-clock interrupt
487while executing in usermode, a quiescent state will be noted by 487while executing in usermode, a quiescent state will be noted by
488<tt>rcu_check_callbacks()</tt> on the right. 488<tt>rcu_sched_clock_irq()</tt> on the right.
489Either way, the passage through a quiescent state will be noted 489Either way, the passage through a quiescent state will be noted
490in a per-CPU variable. 490in a per-CPU variable.
491 491
492<p>The next time an <tt>RCU_SOFTIRQ</tt> handler executes on 492<p>The next time an <tt>RCU_SOFTIRQ</tt> handler executes on
493this CPU (for example, after the next scheduler-clock 493this CPU (for example, after the next scheduler-clock
494interrupt), <tt>__rcu_process_callbacks()</tt> will invoke 494interrupt), <tt>rcu_core()</tt> will invoke
495<tt>rcu_check_quiescent_state()</tt>, which will notice the 495<tt>rcu_check_quiescent_state()</tt>, which will notice the
496recorded quiescent state, and invoke 496recorded quiescent state, and invoke
497<tt>rcu_report_qs_rdp()</tt>. 497<tt>rcu_report_qs_rdp()</tt>.
@@ -651,7 +651,7 @@ to end.
651These callbacks are identified by <tt>rcu_advance_cbs()</tt>, 651These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
652which is usually invoked by <tt>__note_gp_changes()</tt>. 652which is usually invoked by <tt>__note_gp_changes()</tt>.
653As shown in the diagram below, this invocation can be triggered by 653As shown in the diagram below, this invocation can be triggered by
654the scheduling-clock interrupt (<tt>rcu_check_callbacks()</tt> on 654the scheduling-clock interrupt (<tt>rcu_sched_clock_irq()</tt> on
655the left) or by idle entry (<tt>rcu_cleanup_after_idle()</tt> on 655the left) or by idle entry (<tt>rcu_cleanup_after_idle()</tt> on
656the right, but only for kernels build with 656the right, but only for kernels build with
657<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>). 657<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>).
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
index 832408313d93..3fcf0c17cef2 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
@@ -349,7 +349,7 @@
349 font-weight="bold" 349 font-weight="bold"
350 font-size="192" 350 font-size="192"
351 id="text202-7-5" 351 id="text202-7-5"
352 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text> 352 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq()</text>
353 <rect 353 <rect
354 x="7069.6187" 354 x="7069.6187"
355 y="5087.4678" 355 y="5087.4678"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index acd73c7ad0f4..2bcd742d6e49 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -3902,7 +3902,7 @@
3902 font-style="normal" 3902 font-style="normal"
3903 y="-4418.6582" 3903 y="-4418.6582"
3904 x="3745.7725" 3904 x="3745.7725"
3905 xml:space="preserve">rcu_check_callbacks()</text> 3905 xml:space="preserve">rcu_sched_clock_irq()</text>
3906 </g> 3906 </g>
3907 <g 3907 <g
3908 transform="translate(-850.30204,55463.106)" 3908 transform="translate(-850.30204,55463.106)"
@@ -3924,7 +3924,7 @@
3924 font-style="normal" 3924 font-style="normal"
3925 y="-4418.6582" 3925 y="-4418.6582"
3926 x="3745.7725" 3926 x="3745.7725"
3927 xml:space="preserve">rcu_process_callbacks()</text> 3927 xml:space="preserve">rcu_core()</text>
3928 <text 3928 <text
3929 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier" 3929 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
3930 id="text202-7-5-3-27-0" 3930 id="text202-7-5-3-27-0"
@@ -3933,7 +3933,7 @@
3933 font-style="normal" 3933 font-style="normal"
3934 y="-4165.7954" 3934 y="-4165.7954"
3935 x="3745.7725" 3935 x="3745.7725"
3936 xml:space="preserve">rcu_check_quiescent_state())</text> 3936 xml:space="preserve">rcu_check_quiescent_state()</text>
3937 <text 3937 <text
3938 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier" 3938 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
3939 id="text202-7-5-3-27-0-9" 3939 id="text202-7-5-3-27-0-9"
@@ -4968,7 +4968,7 @@
4968 font-weight="bold" 4968 font-weight="bold"
4969 font-size="192" 4969 font-size="192"
4970 id="text202-7-5-19" 4970 id="text202-7-5-19"
4971 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text> 4971 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq()</text>
4972 <rect 4972 <rect
4973 x="5314.2671" 4973 x="5314.2671"
4974 y="82817.688" 4974 y="82817.688"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index 149bec2a4493..779c9ac31a52 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -775,7 +775,7 @@
775 font-style="normal" 775 font-style="normal"
776 y="-4418.6582" 776 y="-4418.6582"
777 x="3745.7725" 777 x="3745.7725"
778 xml:space="preserve">rcu_check_callbacks()</text> 778 xml:space="preserve">rcu_sched_clock_irq()</text>
779 </g> 779 </g>
780 <g 780 <g
781 transform="translate(399.7744,828.86448)" 781 transform="translate(399.7744,828.86448)"
@@ -797,7 +797,7 @@
797 font-style="normal" 797 font-style="normal"
798 y="-4418.6582" 798 y="-4418.6582"
799 x="3745.7725" 799 x="3745.7725"
800 xml:space="preserve">rcu_process_callbacks()</text> 800 xml:space="preserve">rcu_core()</text>
801 <text 801 <text
802 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier" 802 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
803 id="text202-7-5-3-27-0" 803 id="text202-7-5-3-27-0"
@@ -806,7 +806,7 @@
806 font-style="normal" 806 font-style="normal"
807 y="-4165.7954" 807 y="-4165.7954"
808 x="3745.7725" 808 x="3745.7725"
809 xml:space="preserve">rcu_check_quiescent_state())</text> 809 xml:space="preserve">rcu_check_quiescent_state()</text>
810 <text 810 <text
811 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier" 811 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
812 id="text202-7-5-3-27-0-9" 812 id="text202-7-5-3-27-0-9"
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 9fca73e03a98..5a9238a2883c 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -3099,7 +3099,7 @@ If you block forever in one of a given domain's SRCU read-side critical
3099sections, then that domain's grace periods will also be blocked forever. 3099sections, then that domain's grace periods will also be blocked forever.
3100Of course, one good way to block forever is to deadlock, which can 3100Of course, one good way to block forever is to deadlock, which can
3101happen if any operation in a given domain's SRCU read-side critical 3101happen if any operation in a given domain's SRCU read-side critical
3102section can block waiting, either directly or indirectly, for that domain's 3102section can wait, either directly or indirectly, for that domain's
3103grace period to elapse. 3103grace period to elapse.
3104For example, this results in a self-deadlock: 3104For example, this results in a self-deadlock:
3105 3105
@@ -3139,12 +3139,18 @@ API, which, in combination with <tt>srcu_read_unlock()</tt>,
3139guarantees a full memory barrier. 3139guarantees a full memory barrier.
3140 3140
3141<p> 3141<p>
3142Also unlike other RCU flavors, SRCU's callbacks-wait function 3142Also unlike other RCU flavors, <tt>synchronize_srcu()</tt> may <b>not</b>
3143<tt>srcu_barrier()</tt> may be invoked from CPU-hotplug notifiers, 3143be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace
3144though this is not necessarily a good idea. 3144periods make use of timers and the possibility of timers being temporarily
3145The reason that this is possible is that SRCU is insensitive 3145&ldquo;stranded&rdquo; on the outgoing CPU.
3146to whether or not a CPU is online, which means that <tt>srcu_barrier()</tt> 3146This stranding of timers means that timers posted to the outgoing CPU
3147need not exclude CPU-hotplug operations. 3147will not fire until late in the CPU-hotplug process.
3148The problem is that if a notifier is waiting on an SRCU grace period,
3149that grace period is waiting on a timer, and that timer is stranded on the
3150outgoing CPU, then the notifier will never be awakened, in other words,
3151deadlock has occurred.
3152This same situation of course also prohibits <tt>srcu_barrier()</tt>
3153from being invoked from CPU-hotplug notifiers.
3148 3154
3149<p> 3155<p>
3150SRCU also differs from other RCU flavors in that SRCU's expedited and 3156SRCU also differs from other RCU flavors in that SRCU's expedited and
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 073dbc12d1ea..1ab70c37921f 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -219,17 +219,18 @@ an estimate of the total number of RCU callbacks queued across all CPUs
219In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed 219In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
220for each CPU: 220for each CPU:
221 221
222 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D 222 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 Nonlazy posted: ..D
223 223
224The "last_accelerate:" prints the low-order 16 bits (in hex) of the 224The "last_accelerate:" prints the low-order 16 bits (in hex) of the
225jiffies counter when this CPU last invoked rcu_try_advance_all_cbs() 225jiffies counter when this CPU last invoked rcu_try_advance_all_cbs()
226from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from 226from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from
227rcu_prepare_for_idle(). The "nonlazy_posted:" prints the number 227rcu_prepare_for_idle(). The "Nonlazy posted:" indicates lazy-callback
228of non-lazy callbacks posted since the last call to rcu_needs_cpu(). 228status, so that an "l" indicates that all callbacks were lazy at the start
229Finally, an "L" indicates that there are currently no non-lazy callbacks 229of the last idle period and an "L" indicates that there are currently
230("." is printed otherwise, as shown above) and "D" indicates that 230no non-lazy callbacks (in both cases, "." is printed otherwise, as
231dyntick-idle processing is enabled ("." is printed otherwise, for example, 231shown above) and "D" indicates that dyntick-idle processing is enabled
232if disabled via the "nohz=" kernel boot parameter). 232("." is printed otherwise, for example, if disabled via the "nohz="
233kernel boot parameter).
233 234
234If the grace period ends just as the stall warning starts printing, 235If the grace period ends just as the stall warning starts printing,
235there will be a spurious stall-warning message, which will include 236there will be a spurious stall-warning message, which will include
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 55918b54808b..a41a0384d20c 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -10,173 +10,8 @@ status messages via printk(), which can be examined via the dmesg
10command (perhaps grepping for "torture"). The test is started 10command (perhaps grepping for "torture"). The test is started
11when the module is loaded, and stops when the module is unloaded. 11when the module is loaded, and stops when the module is unloaded.
12 12
13 13Module parameters are prefixed by "rcutorture." in
14MODULE PARAMETERS 14Documentation/admin-guide/kernel-parameters.txt.
15
16This module has the following parameters:
17
18fqs_duration Duration (in microseconds) of artificially induced bursts
19 of force_quiescent_state() invocations. In RCU
20 implementations having force_quiescent_state(), these
21 bursts help force races between forcing a given grace
22 period and that grace period ending on its own.
23
24fqs_holdoff Holdoff time (in microseconds) between consecutive calls
25 to force_quiescent_state() within a burst.
26
27fqs_stutter Wait time (in seconds) between consecutive bursts
28 of calls to force_quiescent_state().
29
30gp_normal Make the fake writers use normal synchronous grace-period
31 primitives.
32
33gp_exp Make the fake writers use expedited synchronous grace-period
34 primitives. If both gp_normal and gp_exp are set, or
35 if neither gp_normal nor gp_exp are set, then randomly
36 choose the primitive so that about 50% are normal and
37 50% expedited. By default, neither are set, which
38 gives best overall test coverage.
39
40irqreader Says to invoke RCU readers from irq level. This is currently
41 done via timers. Defaults to "1" for variants of RCU that
42 permit this. (Or, more accurately, variants of RCU that do
43 -not- permit this know to ignore this variable.)
44
45n_barrier_cbs If this is nonzero, RCU barrier testing will be conducted,
46 in which case n_barrier_cbs specifies the number of
47 RCU callbacks (and corresponding kthreads) to use for
48 this testing. The value cannot be negative. If you
49 specify this to be non-zero when torture_type indicates a
50 synchronous RCU implementation (one for which a member of
51 the synchronize_rcu() rather than the call_rcu() family is
52 used -- see the documentation for torture_type below), an
53 error will be reported and no testing will be carried out.
54
55nfakewriters This is the number of RCU fake writer threads to run. Fake
56 writer threads repeatedly use the synchronous "wait for
57 current readers" function of the interface selected by
58 torture_type, with a delay between calls to allow for various
59 different numbers of writers running in parallel.
60 nfakewriters defaults to 4, which provides enough parallelism
61 to trigger special cases caused by multiple writers, such as
62 the synchronize_srcu() early return optimization.
63
64nreaders This is the number of RCU reading threads supported.
65 The default is twice the number of CPUs. Why twice?
66 To properly exercise RCU implementations with preemptible
67 read-side critical sections.
68
69onoff_interval
70 The number of seconds between each attempt to execute a
71 randomly selected CPU-hotplug operation. Defaults to
72 zero, which disables CPU hotplugging. In HOTPLUG_CPU=n
73 kernels, rcutorture will silently refuse to do any
74 CPU-hotplug operations regardless of what value is
75 specified for onoff_interval.
76
77onoff_holdoff The number of seconds to wait until starting CPU-hotplug
78 operations. This would normally only be used when
79 rcutorture was built into the kernel and started
80 automatically at boot time, in which case it is useful
81 in order to avoid confusing boot-time code with CPUs
82 coming and going.
83
84shuffle_interval
85 The number of seconds to keep the test threads affinitied
86 to a particular subset of the CPUs, defaults to 3 seconds.
87 Used in conjunction with test_no_idle_hz.
88
89shutdown_secs The number of seconds to run the test before terminating
90 the test and powering off the system. The default is
91 zero, which disables test termination and system shutdown.
92 This capability is useful for automated testing.
93
94stall_cpu The number of seconds that a CPU should be stalled while
95 within both an rcu_read_lock() and a preempt_disable().
96 This stall happens only once per rcutorture run.
97 If you need multiple stalls, use modprobe and rmmod to
98 repeatedly run rcutorture. The default for stall_cpu
99 is zero, which prevents rcutorture from stalling a CPU.
100
101 Note that attempts to rmmod rcutorture while the stall
102 is ongoing will hang, so be careful what value you
103 choose for this module parameter! In addition, too-large
104 values for stall_cpu might well induce failures and
105 warnings in other parts of the kernel. You have been
106 warned!
107
108stall_cpu_holdoff
109 The number of seconds to wait after rcutorture starts
110 before stalling a CPU. Defaults to 10 seconds.
111
112stat_interval The number of seconds between output of torture
113 statistics (via printk()). Regardless of the interval,
114 statistics are printed when the module is unloaded.
115 Setting the interval to zero causes the statistics to
116 be printed -only- when the module is unloaded, and this
117 is the default.
118
119stutter The length of time to run the test before pausing for this
120 same period of time. Defaults to "stutter=5", so as
121 to run and pause for (roughly) five-second intervals.
122 Specifying "stutter=0" causes the test to run continuously
123 without pausing, which is the old default behavior.
124
125test_boost Whether or not to test the ability of RCU to do priority
126 boosting. Defaults to "test_boost=1", which performs
127 RCU priority-inversion testing only if the selected
128 RCU implementation supports priority boosting. Specifying
129 "test_boost=0" never performs RCU priority-inversion
130 testing. Specifying "test_boost=2" performs RCU
131 priority-inversion testing even if the selected RCU
132 implementation does not support RCU priority boosting,
133 which can be used to test rcutorture's ability to
134 carry out RCU priority-inversion testing.
135
136test_boost_interval
137 The number of seconds in an RCU priority-inversion test
138 cycle. Defaults to "test_boost_interval=7". It is
139 usually wise for this value to be relatively prime to
140 the value selected for "stutter".
141
142test_boost_duration
143 The number of seconds to do RCU priority-inversion testing
144 within any given "test_boost_interval". Defaults to
145 "test_boost_duration=4".
146
147test_no_idle_hz Whether or not to test the ability of RCU to operate in
148 a kernel that disables the scheduling-clock interrupt to
149 idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
150 Defaults to omitting this test.
151
152torture_type The type of RCU to test, with string values as follows:
153
154 "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(),
155 along with expedited, synchronous, and polling
156 variants.
157
158 "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and
159 call_rcu_bh(), along with expedited and synchronous
160 variants.
161
162 "rcu_busted": This tests an intentionally incorrect version
163 of RCU in order to help test rcutorture itself.
164
165 "srcu": srcu_read_lock(), srcu_read_unlock() and
166 call_srcu(), along with expedited and
167 synchronous variants.
168
169 "sched": preempt_disable(), preempt_enable(), and
170 call_rcu_sched(), along with expedited,
171 synchronous, and polling variants.
172
173 "tasks": voluntary context switch and call_rcu_tasks(),
174 along with expedited and synchronous variants.
175
176 Defaults to "rcu".
177
178verbose Enable debug printk()s. Default is disabled.
179
180 15
181OUTPUT 16OUTPUT
182 17
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 4a6854318b17..1ace20815bb1 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -302,7 +302,7 @@ rcu_dereference()
302 must prohibit. The rcu_dereference_protected() variant takes 302 must prohibit. The rcu_dereference_protected() variant takes
303 a lockdep expression to indicate which locks must be acquired 303 a lockdep expression to indicate which locks must be acquired
304 by the caller. If the indicated protection is not provided, 304 by the caller. If the indicated protection is not provided,
305 a lockdep splat is emitted. See RCU/Design/Requirements.html 305 a lockdep splat is emitted. See RCU/Design/Requirements/Requirements.html
306 and the API's code comments for more details and example usage. 306 and the API's code comments for more details and example usage.
307 307
308The following diagram shows how each API communicates among the 308The following diagram shows how each API communicates among the
@@ -560,7 +560,7 @@ presents two such "toy" implementations of RCU, one that is implemented
560in terms of familiar locking primitives, and another that more closely 560in terms of familiar locking primitives, and another that more closely
561resembles "classic" RCU. Both are way too simple for real-world use, 561resembles "classic" RCU. Both are way too simple for real-world use,
562lacking both functionality and performance. However, they are useful 562lacking both functionality and performance. However, they are useful
563in getting a feel for how RCU works. See kernel/rcupdate.c for a 563in getting a feel for how RCU works. See kernel/rcu/update.c for a
564production-quality implementation, and see: 564production-quality implementation, and see:
565 565
566 http://www.rdrop.com/users/paulmck/RCU 566 http://www.rdrop.com/users/paulmck/RCU
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 858b6c0b9a15..28481510ad4e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3653,19 +3653,6 @@
3653 latencies, which will choose a value aligned 3653 latencies, which will choose a value aligned
3654 with the appropriate hardware boundaries. 3654 with the appropriate hardware boundaries.
3655 3655
3656 rcutree.jiffies_till_sched_qs= [KNL]
3657 Set required age in jiffies for a
3658 given grace period before RCU starts
3659 soliciting quiescent-state help from
3660 rcu_note_context_switch(). If not specified, the
3661 kernel will calculate a value based on the most
3662 recent settings of rcutree.jiffies_till_first_fqs
3663 and rcutree.jiffies_till_next_fqs.
3664 This calculated value may be viewed in
3665 rcutree.jiffies_to_sched_qs. Any attempt to
3666 set rcutree.jiffies_to_sched_qs will be
3667 cheerfully overwritten.
3668
3669 rcutree.jiffies_till_first_fqs= [KNL] 3656 rcutree.jiffies_till_first_fqs= [KNL]
3670 Set delay from grace-period initialization to 3657 Set delay from grace-period initialization to
3671 first attempt to force quiescent states. 3658 first attempt to force quiescent states.
@@ -3677,6 +3664,20 @@
3677 quiescent states. Units are jiffies, minimum 3664 quiescent states. Units are jiffies, minimum
3678 value is one, and maximum value is HZ. 3665 value is one, and maximum value is HZ.
3679 3666
3667 rcutree.jiffies_till_sched_qs= [KNL]
3668 Set required age in jiffies for a
3669 given grace period before RCU starts
3670 soliciting quiescent-state help from
3671 rcu_note_context_switch() and cond_resched().
3672 If not specified, the kernel will calculate
3673 a value based on the most recent settings
3674 of rcutree.jiffies_till_first_fqs
3675 and rcutree.jiffies_till_next_fqs.
3676 This calculated value may be viewed in
3677 rcutree.jiffies_to_sched_qs. Any attempt to set
3678 rcutree.jiffies_to_sched_qs will be cheerfully
3679 overwritten.
3680
3680 rcutree.kthread_prio= [KNL,BOOT] 3681 rcutree.kthread_prio= [KNL,BOOT]
3681 Set the SCHED_FIFO priority of the RCU per-CPU 3682 Set the SCHED_FIFO priority of the RCU per-CPU
3682 kthreads (rcuc/N). This value is also used for 3683 kthreads (rcuc/N). This value is also used for
@@ -3720,6 +3721,11 @@
3720 This wake_up() will be accompanied by a 3721 This wake_up() will be accompanied by a
3721 WARN_ONCE() splat and an ftrace_dump(). 3722 WARN_ONCE() splat and an ftrace_dump().
3722 3723
3724 rcutree.sysrq_rcu= [KNL]
3725 Commandeer a sysrq key to dump out Tree RCU's
3726 rcu_node tree with an eye towards determining
3727 why a new grace period has not yet started.
3728
3723 rcuperf.gp_async= [KNL] 3729 rcuperf.gp_async= [KNL]
3724 Measure performance of asynchronous 3730 Measure performance of asynchronous
3725 grace-period primitives such as call_rcu(). 3731 grace-period primitives such as call_rcu().
diff --git a/MAINTAINERS b/MAINTAINERS
index 9919840d54cd..65217477b036 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10797,6 +10797,12 @@ F: drivers/power/supply/bq27xxx_battery_i2c.c
10797F: drivers/power/supply/isp1704_charger.c 10797F: drivers/power/supply/isp1704_charger.c
10798F: drivers/power/supply/rx51_battery.c 10798F: drivers/power/supply/rx51_battery.c
10799 10799
10800NOLIBC HEADER FILE
10801M: Willy Tarreau <w@1wt.eu>
10802S: Maintained
10803T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
10804F: tools/include/nolibc/
10805
10800NTB AMD DRIVER 10806NTB AMD DRIVER
10801M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> 10807M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
10802L: linux-ntb@googlegroups.com 10808L: linux-ntb@googlegroups.com
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 20561a60db9c..0e9bd9c83870 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -211,9 +211,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
211/* 211/*
212 * WARN_ON_SMP() is for cases that the warning is either 212 * WARN_ON_SMP() is for cases that the warning is either
213 * meaningless for !SMP or may even cause failures. 213 * meaningless for !SMP or may even cause failures.
214 * This is usually used for cases that we have
215 * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
216 * returns 0 for uniprocessor settings.
217 * It can also be used with values that are only defined 214 * It can also be used with values that are only defined
218 * on SMP: 215 * on SMP:
219 * 216 *
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index 426cee67f0e2..b8e094b125ee 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * RCU node combining tree definitions. These are used to compute 3 * RCU node combining tree definitions. These are used to compute
3 * global attributes while avoiding common-case global contention. A key 4 * global attributes while avoiding common-case global contention. A key
@@ -11,23 +12,9 @@
11 * because the size of the TREE SRCU srcu_struct structure depends 12 * because the size of the TREE SRCU srcu_struct structure depends
12 * on these definitions. 13 * on these definitions.
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, you can access it online at
26 * http://www.gnu.org/licenses/gpl-2.0.html.
27 *
28 * Copyright IBM Corporation, 2017 15 * Copyright IBM Corporation, 2017
29 * 16 *
30 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 17 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
31 */ 18 */
32 19
33#ifndef __LINUX_RCU_NODE_TREE_H 20#ifndef __LINUX_RCU_NODE_TREE_H
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index c3ad00e63556..87404cb015f1 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * RCU segmented callback lists 3 * RCU segmented callback lists
3 * 4 *
@@ -5,23 +6,9 @@
5 * because the size of the TREE SRCU srcu_struct structure depends 6 * because the size of the TREE SRCU srcu_struct structure depends
6 * on these definitions. 7 * on these definitions.
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, you can access it online at
20 * http://www.gnu.org/licenses/gpl-2.0.html.
21 *
22 * Copyright IBM Corporation, 2017 9 * Copyright IBM Corporation, 2017
23 * 10 *
24 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 11 * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com>
25 */ 12 */
26 13
27#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H 14#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index ece7ed9a4a70..6fc53a1345b3 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -1,20 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * RCU-based infrastructure for lightweight reader-writer locking 3 * RCU-based infrastructure for lightweight reader-writer locking
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (c) 2015, Red Hat, Inc. 5 * Copyright (c) 2015, Red Hat, Inc.
19 * 6 *
20 * Author: Oleg Nesterov <oleg@redhat.com> 7 * Author: Oleg Nesterov <oleg@redhat.com>
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4db8bcacc51a..6cdb1db776cf 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1,25 +1,12 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion 3 * Read-Copy Update mechanism for mutual exclusion
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2001 5 * Copyright IBM Corporation, 2001
19 * 6 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * 8 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 9 * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 10 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers: 11 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 12 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
@@ -89,7 +76,7 @@ static inline int rcu_preempt_depth(void)
89/* Internal to kernel */ 76/* Internal to kernel */
90void rcu_init(void); 77void rcu_init(void);
91extern int rcu_scheduler_active __read_mostly; 78extern int rcu_scheduler_active __read_mostly;
92void rcu_check_callbacks(int user); 79void rcu_sched_clock_irq(int user);
93void rcu_report_dead(unsigned int cpu); 80void rcu_report_dead(unsigned int cpu);
94void rcutree_migrate_callbacks(int cpu); 81void rcutree_migrate_callbacks(int cpu);
95 82
@@ -309,16 +296,16 @@ static inline void rcu_preempt_sleep_check(void) { }
309 */ 296 */
310 297
311#ifdef __CHECKER__ 298#ifdef __CHECKER__
312#define rcu_dereference_sparse(p, space) \ 299#define rcu_check_sparse(p, space) \
313 ((void)(((typeof(*p) space *)p) == p)) 300 ((void)(((typeof(*p) space *)p) == p))
314#else /* #ifdef __CHECKER__ */ 301#else /* #ifdef __CHECKER__ */
315#define rcu_dereference_sparse(p, space) 302#define rcu_check_sparse(p, space)
316#endif /* #else #ifdef __CHECKER__ */ 303#endif /* #else #ifdef __CHECKER__ */
317 304
318#define __rcu_access_pointer(p, space) \ 305#define __rcu_access_pointer(p, space) \
319({ \ 306({ \
320 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ 307 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
321 rcu_dereference_sparse(p, space); \ 308 rcu_check_sparse(p, space); \
322 ((typeof(*p) __force __kernel *)(_________p1)); \ 309 ((typeof(*p) __force __kernel *)(_________p1)); \
323}) 310})
324#define __rcu_dereference_check(p, c, space) \ 311#define __rcu_dereference_check(p, c, space) \
@@ -326,13 +313,13 @@ static inline void rcu_preempt_sleep_check(void) { }
326 /* Dependency order vs. p above. */ \ 313 /* Dependency order vs. p above. */ \
327 typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ 314 typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
328 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ 315 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
329 rcu_dereference_sparse(p, space); \ 316 rcu_check_sparse(p, space); \
330 ((typeof(*p) __force __kernel *)(________p1)); \ 317 ((typeof(*p) __force __kernel *)(________p1)); \
331}) 318})
332#define __rcu_dereference_protected(p, c, space) \ 319#define __rcu_dereference_protected(p, c, space) \
333({ \ 320({ \
334 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ 321 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
335 rcu_dereference_sparse(p, space); \ 322 rcu_check_sparse(p, space); \
336 ((typeof(*p) __force __kernel *)(p)); \ 323 ((typeof(*p) __force __kernel *)(p)); \
337}) 324})
338#define rcu_dereference_raw(p) \ 325#define rcu_dereference_raw(p) \
@@ -382,6 +369,7 @@ static inline void rcu_preempt_sleep_check(void) { }
382#define rcu_assign_pointer(p, v) \ 369#define rcu_assign_pointer(p, v) \
383({ \ 370({ \
384 uintptr_t _r_a_p__v = (uintptr_t)(v); \ 371 uintptr_t _r_a_p__v = (uintptr_t)(v); \
372 rcu_check_sparse(p, __rcu); \
385 \ 373 \
386 if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ 374 if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
387 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ 375 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
@@ -785,7 +773,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
785 */ 773 */
786#define RCU_INIT_POINTER(p, v) \ 774#define RCU_INIT_POINTER(p, v) \
787 do { \ 775 do { \
788 rcu_dereference_sparse(p, __rcu); \ 776 rcu_check_sparse(p, __rcu); \
789 WRITE_ONCE(p, RCU_INITIALIZER(v)); \ 777 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
790 } while (0) 778 } while (0)
791 779
@@ -859,7 +847,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
859 847
860/* Has the specified rcu_head structure been handed to call_rcu()? */ 848/* Has the specified rcu_head structure been handed to call_rcu()? */
861 849
862/* 850/**
863 * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() 851 * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
864 * @rhp: The rcu_head structure to initialize. 852 * @rhp: The rcu_head structure to initialize.
865 * 853 *
@@ -874,10 +862,10 @@ static inline void rcu_head_init(struct rcu_head *rhp)
874 rhp->func = (rcu_callback_t)~0L; 862 rhp->func = (rcu_callback_t)~0L;
875} 863}
876 864
877/* 865/**
878 * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? 866 * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
879 * @rhp: The rcu_head structure to test. 867 * @rhp: The rcu_head structure to test.
880 * @func: The function passed to call_rcu() along with @rhp. 868 * @f: The function passed to call_rcu() along with @rhp.
881 * 869 *
882 * Returns @true if the @rhp has been passed to call_rcu() with @func, 870 * Returns @true if the @rhp has been passed to call_rcu() with @func,
883 * and @false otherwise. Emits a warning in any other case, including 871 * and @false otherwise. Emits a warning in any other case, including
@@ -896,57 +884,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
896 return false; 884 return false;
897} 885}
898 886
899
900/* Transitional pre-consolidation compatibility definitions. */
901
902static inline void synchronize_rcu_bh(void)
903{
904 synchronize_rcu();
905}
906
907static inline void synchronize_rcu_bh_expedited(void)
908{
909 synchronize_rcu_expedited();
910}
911
912static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
913{
914 call_rcu(head, func);
915}
916
917static inline void rcu_barrier_bh(void)
918{
919 rcu_barrier();
920}
921
922static inline void synchronize_sched(void)
923{
924 synchronize_rcu();
925}
926
927static inline void synchronize_sched_expedited(void)
928{
929 synchronize_rcu_expedited();
930}
931
932static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
933{
934 call_rcu(head, func);
935}
936
937static inline void rcu_barrier_sched(void)
938{
939 rcu_barrier();
940}
941
942static inline unsigned long get_state_synchronize_sched(void)
943{
944 return get_state_synchronize_rcu();
945}
946
947static inline void cond_synchronize_sched(unsigned long oldstate)
948{
949 cond_synchronize_rcu(oldstate);
950}
951
952#endif /* __LINUX_RCUPDATE_H */ 887#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index af65d1f36ddb..8e727f57d814 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008 5 * Copyright IBM Corporation, 2008
19 * 6 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
21 * 8 *
22 * For detailed explanation of Read-Copy Update mechanism see - 9 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU 10 * Documentation/RCU
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 7f83179177d1..735601ac27d3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -1,26 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008 5 * Copyright IBM Corporation, 2008
19 * 6 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm 8 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
22 * 9 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * 12 *
26 * For detailed explanation of Read-Copy Update mechanism see - 13 * For detailed explanation of Read-Copy Update mechanism see -
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c614375cd264..c495b2d51569 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -1,24 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion 3 * Sleepable Read-Copy Update mechanism for mutual exclusion
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2006 5 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012 6 * Copyright (C) Fujitsu, 2012
20 * 7 *
21 * Author: Paul McKenney <paulmck@us.ibm.com> 8 * Author: Paul McKenney <paulmck@linux.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com> 9 * Lai Jiangshan <laijs@cn.fujitsu.com>
23 * 10 *
24 * For detailed explanation of Read-Copy Update mechanism see - 11 * For detailed explanation of Read-Copy Update mechanism see -
@@ -223,6 +210,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
223static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 210static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
224 __releases(ssp) 211 __releases(ssp)
225{ 212{
213 WARN_ON_ONCE(idx & ~0x1);
226 rcu_lock_release(&(ssp)->dep_map); 214 rcu_lock_release(&(ssp)->dep_map);
227 __srcu_read_unlock(ssp, idx); 215 __srcu_read_unlock(ssp, idx);
228} 216}
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index b19216aaaef2..5a5a1941ca15 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -1,24 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion, 3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
3 * tiny variant. 4 * tiny variant.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright (C) IBM Corporation, 2017 6 * Copyright (C) IBM Corporation, 2017
20 * 7 *
21 * Author: Paul McKenney <paulmck@us.ibm.com> 8 * Author: Paul McKenney <paulmck@linux.ibm.com>
22 */ 9 */
23 10
24#ifndef _LINUX_SRCU_TINY_H 11#ifndef _LINUX_SRCU_TINY_H
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 6f292bd3e7db..7f7c8c050f63 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -1,24 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion, 3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
3 * tree variant. 4 * tree variant.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright (C) IBM Corporation, 2017 6 * Copyright (C) IBM Corporation, 2017
20 * 7 *
21 * Author: Paul McKenney <paulmck@us.ibm.com> 8 * Author: Paul McKenney <paulmck@linux.ibm.com>
22 */ 9 */
23 10
24#ifndef _LINUX_SRCU_TREE_H 11#ifndef _LINUX_SRCU_TREE_H
@@ -45,7 +32,8 @@ struct srcu_data {
45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ 32 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ 33 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
47 bool srcu_cblist_invoking; /* Invoking these CBs? */ 34 bool srcu_cblist_invoking; /* Invoking these CBs? */
48 struct delayed_work work; /* Context for CB invoking. */ 35 struct timer_list delay_work; /* Delay for CB invoking */
36 struct work_struct work; /* Context for CB invoking. */
49 struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ 37 struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
50 struct srcu_node *mynode; /* Leaf srcu_node. */ 38 struct srcu_node *mynode; /* Leaf srcu_node. */
51 unsigned long grpmask; /* Mask for leaf srcu_node */ 39 unsigned long grpmask; /* Mask for leaf srcu_node */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 48fad21109fc..23d80db426d7 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Common functions for in-kernel torture tests. 3 * Common functions for in-kernel torture tests.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2014 5 * Copyright IBM Corporation, 2014
19 * 6 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#ifndef __LINUX_TORTURE_H 10#ifndef __LINUX_TORTURE_H
@@ -50,11 +37,12 @@
50 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) 37 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
51 38
52/* Definitions for online/offline exerciser. */ 39/* Definitions for online/offline exerciser. */
40typedef void torture_ofl_func(void);
53bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, 41bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
54 unsigned long *sum_offl, int *min_onl, int *max_onl); 42 unsigned long *sum_offl, int *min_onl, int *max_onl);
55bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, 43bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
56 unsigned long *sum_onl, int *min_onl, int *max_onl); 44 unsigned long *sum_onl, int *min_onl, int *max_onl);
57int torture_onoff_init(long ooholdoff, long oointerval); 45int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f);
58void torture_onoff_stats(void); 46void torture_onoff_stats(void);
59bool torture_onoff_failures(void); 47bool torture_onoff_failures(void);
60 48
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 7d0b0ed74404..ad40a2617063 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Module-based torture test facility for locking 3 * Module-based torture test facility for locking
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014 5 * Copyright (C) IBM Corporation, 2014
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net> 8 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c. 9 * Based on kernel/rcu/torture.c.
23 */ 10 */
@@ -45,7 +32,7 @@
45#include <linux/torture.h> 32#include <linux/torture.h>
46 33
47MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
48MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 35MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
49 36
50torture_param(int, nwriters_stress, -1, 37torture_param(int, nwriters_stress, -1,
51 "Number of write-locking stress-test threads"); 38 "Number of write-locking stress-test threads");
@@ -970,7 +957,7 @@ static int __init lock_torture_init(void)
970 /* Prepare torture context. */ 957 /* Prepare torture context. */
971 if (onoff_interval > 0) { 958 if (onoff_interval > 0) {
972 firsterr = torture_onoff_init(onoff_holdoff * HZ, 959 firsterr = torture_onoff_init(onoff_holdoff * HZ,
973 onoff_interval * HZ); 960 onoff_interval * HZ, NULL);
974 if (firsterr) 961 if (firsterr)
975 goto unwind; 962 goto unwind;
976 } 963 }
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index a393e24a9195..acee72c0b24b 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update definitions shared among RCU implementations. 3 * Read-Copy Update definitions shared among RCU implementations.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2011 5 * Copyright IBM Corporation, 2011
19 * 6 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#ifndef __LINUX_RCU_H 10#ifndef __LINUX_RCU_H
@@ -30,7 +17,7 @@
30#define RCU_TRACE(stmt) 17#define RCU_TRACE(stmt)
31#endif /* #else #ifdef CONFIG_RCU_TRACE */ 18#endif /* #else #ifdef CONFIG_RCU_TRACE */
32 19
33/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */ 20/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
34#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 21#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
35 22
36 23
@@ -462,8 +449,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t);
462 449
463enum rcutorture_type { 450enum rcutorture_type {
464 RCU_FLAVOR, 451 RCU_FLAVOR,
465 RCU_BH_FLAVOR,
466 RCU_SCHED_FLAVOR,
467 RCU_TASKS_FLAVOR, 452 RCU_TASKS_FLAVOR,
468 SRCU_FLAVOR, 453 SRCU_FLAVOR,
469 INVALID_RCU_FLAVOR 454 INVALID_RCU_FLAVOR
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 5aff271adf1e..9bd5f6023c21 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * RCU segmented callback lists, function definitions 3 * RCU segmented callback lists, function definitions
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2017 5 * Copyright IBM Corporation, 2017
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#include <linux/types.h> 10#include <linux/types.h>
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 948470cef385..71b64648464e 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * RCU segmented callback lists, internal-to-rcu header file 3 * RCU segmented callback lists, internal-to-rcu header file
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2017 5 * Copyright IBM Corporation, 2017
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#include <linux/rcu_segcblist.h> 10#include <linux/rcu_segcblist.h>
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index b459da70b4fc..c29761152874 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Read-Copy Update module-based performance-test facility 3 * Read-Copy Update module-based performance-test facility
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2015 5 * Copyright (C) IBM Corporation, 2015
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#define pr_fmt(fmt) fmt 10#define pr_fmt(fmt) fmt
@@ -54,7 +41,7 @@
54#include "rcu.h" 41#include "rcu.h"
55 42
56MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
57MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>"); 44MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
58 45
59#define PERF_FLAG "-perf:" 46#define PERF_FLAG "-perf:"
60#define PERFOUT_STRING(s) \ 47#define PERFOUT_STRING(s) \
@@ -83,13 +70,19 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
83 * Various other use cases may of course be specified. 70 * Various other use cases may of course be specified.
84 */ 71 */
85 72
73#ifdef MODULE
74# define RCUPERF_SHUTDOWN 0
75#else
76# define RCUPERF_SHUTDOWN 1
77#endif
78
86torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); 79torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
87torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); 80torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
88torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 81torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
89torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); 82torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
90torture_param(int, nreaders, -1, "Number of RCU reader threads"); 83torture_param(int, nreaders, -1, "Number of RCU reader threads");
91torture_param(int, nwriters, -1, "Number of RCU updater threads"); 84torture_param(int, nwriters, -1, "Number of RCU updater threads");
92torture_param(bool, shutdown, !IS_ENABLED(MODULE), 85torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
93 "Shutdown at end of performance tests."); 86 "Shutdown at end of performance tests.");
94torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 87torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
95torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); 88torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f6e85faa4ff4..f14d1b18a74f 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Read-Copy Update module-based torture test facility 3 * Read-Copy Update module-based torture test facility
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2005, 2006 5 * Copyright (C) IBM Corporation, 2005, 2006
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 * Josh Triplett <josh@joshtriplett.org> 8 * Josh Triplett <josh@joshtriplett.org>
22 * 9 *
23 * See also: Documentation/RCU/torture.txt 10 * See also: Documentation/RCU/torture.txt
@@ -61,7 +48,7 @@
61#include "rcu.h" 48#include "rcu.h"
62 49
63MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
64MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 51MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
65 52
66 53
67/* Bits for ->extendables field, extendables param, and related definitions. */ 54/* Bits for ->extendables field, extendables param, and related definitions. */
@@ -1630,21 +1617,34 @@ static bool rcu_fwd_emergency_stop;
1630#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1617#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1631#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1618#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1632#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1619#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1633static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)]; 1620struct rcu_launder_hist {
1621 long n_launders;
1622 unsigned long launder_gp_seq;
1623};
1624#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1625static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1626static unsigned long rcu_launder_gp_seq_start;
1634 1627
1635static void rcu_torture_fwd_cb_hist(void) 1628static void rcu_torture_fwd_cb_hist(void)
1636{ 1629{
1630 unsigned long gps;
1631 unsigned long gps_old;
1637 int i; 1632 int i;
1638 int j; 1633 int j;
1639 1634
1640 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1635 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
1641 if (n_launders_hist[i] > 0) 1636 if (n_launders_hist[i].n_launders > 0)
1642 break; 1637 break;
1643 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1638 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1644 __func__, jiffies - rcu_fwd_startat); 1639 __func__, jiffies - rcu_fwd_startat);
1645 for (j = 0; j <= i; j++) 1640 gps_old = rcu_launder_gp_seq_start;
1646 pr_cont(" %ds/%d: %ld", 1641 for (j = 0; j <= i; j++) {
1647 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]); 1642 gps = n_launders_hist[j].launder_gp_seq;
1643 pr_cont(" %ds/%d: %ld:%ld",
1644 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders,
1645 rcutorture_seq_diff(gps, gps_old));
1646 gps_old = gps;
1647 }
1648 pr_cont("\n"); 1648 pr_cont("\n");
1649} 1649}
1650 1650
@@ -1666,7 +1666,8 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1666 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1666 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1667 if (i >= ARRAY_SIZE(n_launders_hist)) 1667 if (i >= ARRAY_SIZE(n_launders_hist))
1668 i = ARRAY_SIZE(n_launders_hist) - 1; 1668 i = ARRAY_SIZE(n_launders_hist) - 1;
1669 n_launders_hist[i]++; 1669 n_launders_hist[i].n_launders++;
1670 n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1670 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1671 spin_unlock_irqrestore(&rcu_fwd_lock, flags);
1671} 1672}
1672 1673
@@ -1786,9 +1787,10 @@ static void rcu_torture_fwd_prog_cr(void)
1786 n_max_cbs = 0; 1787 n_max_cbs = 0;
1787 n_max_gps = 0; 1788 n_max_gps = 0;
1788 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1789 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
1789 n_launders_hist[i] = 0; 1790 n_launders_hist[i].n_launders = 0;
1790 cver = READ_ONCE(rcu_torture_current_version); 1791 cver = READ_ONCE(rcu_torture_current_version);
1791 gps = cur_ops->get_gp_seq(); 1792 gps = cur_ops->get_gp_seq();
1793 rcu_launder_gp_seq_start = gps;
1792 while (time_before(jiffies, stopat) && 1794 while (time_before(jiffies, stopat) &&
1793 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1795 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1794 rfcp = READ_ONCE(rcu_fwd_cb_head); 1796 rfcp = READ_ONCE(rcu_fwd_cb_head);
@@ -2228,6 +2230,14 @@ static void rcu_test_debug_objects(void)
2228#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2230#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2229} 2231}
2230 2232
2233static void rcutorture_sync(void)
2234{
2235 static unsigned long n;
2236
2237 if (cur_ops->sync && !(++n & 0xfff))
2238 cur_ops->sync();
2239}
2240
2231static int __init 2241static int __init
2232rcu_torture_init(void) 2242rcu_torture_init(void)
2233{ 2243{
@@ -2389,7 +2399,8 @@ rcu_torture_init(void)
2389 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2399 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2390 if (firsterr) 2400 if (firsterr)
2391 goto unwind; 2401 goto unwind;
2392 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); 2402 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2403 rcutorture_sync);
2393 if (firsterr) 2404 if (firsterr)
2394 goto unwind; 2405 goto unwind;
2395 firsterr = rcu_torture_stall_init(); 2406 firsterr = rcu_torture_stall_init();
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 32dfd6522548..5d4a39a6505a 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -1,24 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion, 3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
3 * tiny version for non-preemptible single-CPU use. 4 * tiny version for non-preemptible single-CPU use.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright (C) IBM Corporation, 2017 6 * Copyright (C) IBM Corporation, 2017
20 * 7 *
21 * Author: Paul McKenney <paulmck@us.ibm.com> 8 * Author: Paul McKenney <paulmck@linux.ibm.com>
22 */ 9 */
23 10
24#include <linux/export.h> 11#include <linux/export.h>
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3600d88d8956..a60b8ba9e1ac 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -1,24 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion. 3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2006 5 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012 6 * Copyright (C) Fujitsu, 2012
20 * 7 *
21 * Author: Paul McKenney <paulmck@us.ibm.com> 8 * Author: Paul McKenney <paulmck@linux.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com> 9 * Lai Jiangshan <laijs@cn.fujitsu.com>
23 * 10 *
24 * For detailed explanation of Read-Copy Update mechanism see - 11 * For detailed explanation of Read-Copy Update mechanism see -
@@ -58,6 +45,7 @@ static bool __read_mostly srcu_init_done;
58static void srcu_invoke_callbacks(struct work_struct *work); 45static void srcu_invoke_callbacks(struct work_struct *work);
59static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 46static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
60static void process_srcu(struct work_struct *work); 47static void process_srcu(struct work_struct *work);
48static void srcu_delay_timer(struct timer_list *t);
61 49
62/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 50/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
63#define spin_lock_rcu_node(p) \ 51#define spin_lock_rcu_node(p) \
@@ -156,7 +144,8 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
156 snp->grphi = cpu; 144 snp->grphi = cpu;
157 } 145 }
158 sdp->cpu = cpu; 146 sdp->cpu = cpu;
159 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 147 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
148 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
160 sdp->ssp = ssp; 149 sdp->ssp = ssp;
161 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 150 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
162 if (is_static) 151 if (is_static)
@@ -386,13 +375,19 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
386 } else { 375 } else {
387 flush_delayed_work(&ssp->work); 376 flush_delayed_work(&ssp->work);
388 } 377 }
389 for_each_possible_cpu(cpu) 378 for_each_possible_cpu(cpu) {
379 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
380
390 if (quiesced) { 381 if (quiesced) {
391 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work))) 382 if (WARN_ON(timer_pending(&sdp->delay_work)))
383 return; /* Just leak it! */
384 if (WARN_ON(work_pending(&sdp->work)))
392 return; /* Just leak it! */ 385 return; /* Just leak it! */
393 } else { 386 } else {
394 flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work); 387 del_timer_sync(&sdp->delay_work);
388 flush_work(&sdp->work);
395 } 389 }
390 }
396 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 391 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
397 WARN_ON(srcu_readers_active(ssp))) { 392 WARN_ON(srcu_readers_active(ssp))) {
398 pr_info("%s: Active srcu_struct %p state: %d\n", 393 pr_info("%s: Active srcu_struct %p state: %d\n",
@@ -463,39 +458,23 @@ static void srcu_gp_start(struct srcu_struct *ssp)
463 WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 458 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
464} 459}
465 460
466/*
467 * Track online CPUs to guide callback workqueue placement.
468 */
469DEFINE_PER_CPU(bool, srcu_online);
470 461
471void srcu_online_cpu(unsigned int cpu) 462static void srcu_delay_timer(struct timer_list *t)
472{ 463{
473 WRITE_ONCE(per_cpu(srcu_online, cpu), true); 464 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
474}
475 465
476void srcu_offline_cpu(unsigned int cpu) 466 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
477{
478 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
479} 467}
480 468
481/* 469static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
482 * Place the workqueue handler on the specified CPU if online, otherwise
483 * just run it whereever. This is useful for placing workqueue handlers
484 * that are to invoke the specified CPU's callbacks.
485 */
486static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
487 struct delayed_work *dwork,
488 unsigned long delay) 470 unsigned long delay)
489{ 471{
490 bool ret; 472 if (!delay) {
473 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
474 return;
475 }
491 476
492 preempt_disable(); 477 timer_reduce(&sdp->delay_work, jiffies + delay);
493 if (READ_ONCE(per_cpu(srcu_online, cpu)))
494 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
495 else
496 ret = queue_delayed_work(wq, dwork, delay);
497 preempt_enable();
498 return ret;
499} 478}
500 479
501/* 480/*
@@ -504,7 +483,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
504 */ 483 */
505static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 484static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
506{ 485{
507 srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); 486 srcu_queue_delayed_work_on(sdp, delay);
508} 487}
509 488
510/* 489/*
@@ -1186,7 +1165,8 @@ static void srcu_invoke_callbacks(struct work_struct *work)
1186 struct srcu_data *sdp; 1165 struct srcu_data *sdp;
1187 struct srcu_struct *ssp; 1166 struct srcu_struct *ssp;
1188 1167
1189 sdp = container_of(work, struct srcu_data, work.work); 1168 sdp = container_of(work, struct srcu_data, work);
1169
1190 ssp = sdp->ssp; 1170 ssp = sdp->ssp;
1191 rcu_cblist_init(&ready_cbs); 1171 rcu_cblist_init(&ready_cbs);
1192 spin_lock_irq_rcu_node(sdp); 1172 spin_lock_irq_rcu_node(sdp);
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index be10036fa621..a8304d90573f 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -1,20 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * RCU-based infrastructure for lightweight reader-writer locking 3 * RCU-based infrastructure for lightweight reader-writer locking
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (c) 2015, Red Hat, Inc. 5 * Copyright (c) 2015, Red Hat, Inc.
19 * 6 *
20 * Author: Oleg Nesterov <oleg@redhat.com> 7 * Author: Oleg Nesterov <oleg@redhat.com>
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 5f5963ba313e..911bd9076d43 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008 5 * Copyright IBM Corporation, 2008
19 * 6 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
21 * 8 *
22 * For detailed explanation of Read-Copy Update mechanism see - 9 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU 10 * Documentation/RCU
@@ -76,7 +63,7 @@ void rcu_qs(void)
76 * be called from hardirq context. It is normally called from the 63 * be called from hardirq context. It is normally called from the
77 * scheduling-clock interrupt. 64 * scheduling-clock interrupt.
78 */ 65 */
79void rcu_check_callbacks(int user) 66void rcu_sched_clock_irq(int user)
80{ 67{
81 if (user) { 68 if (user) {
82 rcu_qs(); 69 rcu_qs();
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9180158756d2..3b084dbfb4bc 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1,27 +1,14 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion 3 * Read-Copy Update mechanism for mutual exclusion
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008 5 * Copyright IBM Corporation, 2008
19 * 6 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com> 8 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version 9 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version
23 * 10 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * 13 *
27 * For detailed explanation of Read-Copy Update mechanism see - 14 * For detailed explanation of Read-Copy Update mechanism see -
@@ -62,6 +49,7 @@
62#include <linux/suspend.h> 49#include <linux/suspend.h>
63#include <linux/ftrace.h> 50#include <linux/ftrace.h>
64#include <linux/tick.h> 51#include <linux/tick.h>
52#include <linux/sysrq.h>
65 53
66#include "tree.h" 54#include "tree.h"
67#include "rcu.h" 55#include "rcu.h"
@@ -115,6 +103,9 @@ int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 103int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
116/* panic() on RCU Stall sysctl. */ 104/* panic() on RCU Stall sysctl. */
117int sysctl_panic_on_rcu_stall __read_mostly; 105int sysctl_panic_on_rcu_stall __read_mostly;
106/* Commandeer a sysrq key to dump RCU's tree. */
107static bool sysrq_rcu;
108module_param(sysrq_rcu, bool, 0444);
118 109
119/* 110/*
120 * The rcu_scheduler_active variable is initialized to the value 111 * The rcu_scheduler_active variable is initialized to the value
@@ -479,7 +470,6 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next
479module_param(rcu_kick_kthreads, bool, 0644); 470module_param(rcu_kick_kthreads, bool, 0644);
480 471
481static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 472static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
482static void force_quiescent_state(void);
483static int rcu_pending(void); 473static int rcu_pending(void);
484 474
485/* 475/*
@@ -504,13 +494,12 @@ unsigned long rcu_exp_batches_completed(void)
504EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 494EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
505 495
506/* 496/*
507 * Force a quiescent state. 497 * Return the root node of the rcu_state structure.
508 */ 498 */
509void rcu_force_quiescent_state(void) 499static struct rcu_node *rcu_get_root(void)
510{ 500{
511 force_quiescent_state(); 501 return &rcu_state.node[0];
512} 502}
513EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
514 503
515/* 504/*
516 * Convert a ->gp_state value to a character string. 505 * Convert a ->gp_state value to a character string.
@@ -529,19 +518,30 @@ void show_rcu_gp_kthreads(void)
529{ 518{
530 int cpu; 519 int cpu;
531 unsigned long j; 520 unsigned long j;
521 unsigned long ja;
522 unsigned long jr;
523 unsigned long jw;
532 struct rcu_data *rdp; 524 struct rcu_data *rdp;
533 struct rcu_node *rnp; 525 struct rcu_node *rnp;
534 526
535 j = jiffies - READ_ONCE(rcu_state.gp_activity); 527 j = jiffies;
536 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n", 528 ja = j - READ_ONCE(rcu_state.gp_activity);
529 jr = j - READ_ONCE(rcu_state.gp_req_activity);
530 jw = j - READ_ONCE(rcu_state.gp_wake_time);
531 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
537 rcu_state.name, gp_state_getname(rcu_state.gp_state), 532 rcu_state.name, gp_state_getname(rcu_state.gp_state),
538 rcu_state.gp_state, rcu_state.gp_kthread->state, j); 533 rcu_state.gp_state,
534 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
535 ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
536 (long)READ_ONCE(rcu_state.gp_seq),
537 (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
538 READ_ONCE(rcu_state.gp_flags));
539 rcu_for_each_node_breadth_first(rnp) { 539 rcu_for_each_node_breadth_first(rnp) {
540 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) 540 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
541 continue; 541 continue;
542 pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n", 542 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
543 rnp->grplo, rnp->grphi, rnp->gp_seq, 543 rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
544 rnp->gp_seq_needed); 544 (long)rnp->gp_seq_needed);
545 if (!rcu_is_leaf_node(rnp)) 545 if (!rcu_is_leaf_node(rnp))
546 continue; 546 continue;
547 for_each_leaf_node_possible_cpu(rnp, cpu) { 547 for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -550,14 +550,35 @@ void show_rcu_gp_kthreads(void)
550 ULONG_CMP_GE(rcu_state.gp_seq, 550 ULONG_CMP_GE(rcu_state.gp_seq,
551 rdp->gp_seq_needed)) 551 rdp->gp_seq_needed))
552 continue; 552 continue;
553 pr_info("\tcpu %d ->gp_seq_needed %lu\n", 553 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
554 cpu, rdp->gp_seq_needed); 554 cpu, (long)rdp->gp_seq_needed);
555 } 555 }
556 } 556 }
557 /* sched_show_task(rcu_state.gp_kthread); */ 557 /* sched_show_task(rcu_state.gp_kthread); */
558} 558}
559EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 559EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
560 560
561/* Dump grace-period-request information due to commandeered sysrq. */
562static void sysrq_show_rcu(int key)
563{
564 show_rcu_gp_kthreads();
565}
566
567static struct sysrq_key_op sysrq_rcudump_op = {
568 .handler = sysrq_show_rcu,
569 .help_msg = "show-rcu(y)",
570 .action_msg = "Show RCU tree",
571 .enable_mask = SYSRQ_ENABLE_DUMP,
572};
573
574static int __init rcu_sysrq_init(void)
575{
576 if (sysrq_rcu)
577 return register_sysrq_key('y', &sysrq_rcudump_op);
578 return 0;
579}
580early_initcall(rcu_sysrq_init);
581
561/* 582/*
562 * Send along grace-period-related data for rcutorture diagnostics. 583 * Send along grace-period-related data for rcutorture diagnostics.
563 */ 584 */
@@ -566,8 +587,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
566{ 587{
567 switch (test_type) { 588 switch (test_type) {
568 case RCU_FLAVOR: 589 case RCU_FLAVOR:
569 case RCU_BH_FLAVOR:
570 case RCU_SCHED_FLAVOR:
571 *flags = READ_ONCE(rcu_state.gp_flags); 590 *flags = READ_ONCE(rcu_state.gp_flags);
572 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 591 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
573 break; 592 break;
@@ -578,14 +597,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
578EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 597EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
579 598
580/* 599/*
581 * Return the root node of the rcu_state structure.
582 */
583static struct rcu_node *rcu_get_root(void)
584{
585 return &rcu_state.node[0];
586}
587
588/*
589 * Enter an RCU extended quiescent state, which can be either the 600 * Enter an RCU extended quiescent state, which can be either the
590 * idle loop or adaptive-tickless usermode execution. 601 * idle loop or adaptive-tickless usermode execution.
591 * 602 *
@@ -701,7 +712,6 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
701 712
702/** 713/**
703 * rcu_nmi_exit - inform RCU of exit from NMI context 714 * rcu_nmi_exit - inform RCU of exit from NMI context
704 * @irq: Is this call from rcu_irq_exit?
705 * 715 *
706 * If you add or remove a call to rcu_nmi_exit(), be sure to test 716 * If you add or remove a call to rcu_nmi_exit(), be sure to test
707 * with CONFIG_RCU_EQS_DEBUG=y. 717 * with CONFIG_RCU_EQS_DEBUG=y.
@@ -1115,7 +1125,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1115 } 1125 }
1116 1126
1117 /* 1127 /*
1118 * NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks! 1128 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1119 * The above code handles this, but only for straight cond_resched(). 1129 * The above code handles this, but only for straight cond_resched().
1120 * And some in-kernel loops check need_resched() before calling 1130 * And some in-kernel loops check need_resched() before calling
1121 * cond_resched(), which defeats the above code for CPUs that are 1131 * cond_resched(), which defeats the above code for CPUs that are
@@ -1181,7 +1191,7 @@ static void rcu_check_gp_kthread_starvation(void)
1181 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", 1191 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1182 rcu_state.name, j, 1192 rcu_state.name, j,
1183 (long)rcu_seq_current(&rcu_state.gp_seq), 1193 (long)rcu_seq_current(&rcu_state.gp_seq),
1184 rcu_state.gp_flags, 1194 READ_ONCE(rcu_state.gp_flags),
1185 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, 1195 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
1186 gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); 1196 gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
1187 if (gpk) { 1197 if (gpk) {
@@ -1310,7 +1320,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
1310 1320
1311 panic_on_rcu_stall(); 1321 panic_on_rcu_stall();
1312 1322
1313 force_quiescent_state(); /* Kick them all. */ 1323 rcu_force_quiescent_state(); /* Kick them all. */
1314} 1324}
1315 1325
1316static void print_cpu_stall(void) 1326static void print_cpu_stall(void)
@@ -1557,17 +1567,28 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1557} 1567}
1558 1568
1559/* 1569/*
1560 * Awaken the grace-period kthread. Don't do a self-awaken, and don't 1570 * Awaken the grace-period kthread. Don't do a self-awaken (unless in
1561 * bother awakening when there is nothing for the grace-period kthread 1571 * an interrupt or softirq handler), and don't bother awakening when there
1562 * to do (as in several CPUs raced to awaken, and we lost), and finally 1572 * is nothing for the grace-period kthread to do (as in several CPUs raced
1563 * don't try to awaken a kthread that has not yet been created. 1573 * to awaken, and we lost), and finally don't try to awaken a kthread that
1574 * has not yet been created. If all those checks are passed, track some
1575 * debug information and awaken.
1576 *
1577 * So why do the self-wakeup when in an interrupt or softirq handler
1578 * in the grace-period kthread's context? Because the kthread might have
1579 * been interrupted just as it was going to sleep, and just after the final
1580 * pre-sleep check of the awaken condition. In this case, a wakeup really
1581 * is required, and is therefore supplied.
1564 */ 1582 */
1565static void rcu_gp_kthread_wake(void) 1583static void rcu_gp_kthread_wake(void)
1566{ 1584{
1567 if (current == rcu_state.gp_kthread || 1585 if ((current == rcu_state.gp_kthread &&
1586 !in_interrupt() && !in_serving_softirq()) ||
1568 !READ_ONCE(rcu_state.gp_flags) || 1587 !READ_ONCE(rcu_state.gp_flags) ||
1569 !rcu_state.gp_kthread) 1588 !rcu_state.gp_kthread)
1570 return; 1589 return;
1590 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1591 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1571 swake_up_one(&rcu_state.gp_wq); 1592 swake_up_one(&rcu_state.gp_wq);
1572} 1593}
1573 1594
@@ -1711,7 +1732,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1711 zero_cpu_stall_ticks(rdp); 1732 zero_cpu_stall_ticks(rdp);
1712 } 1733 }
1713 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1734 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1714 if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap) 1735 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1715 rdp->gp_seq_needed = rnp->gp_seq_needed; 1736 rdp->gp_seq_needed = rnp->gp_seq_needed;
1716 WRITE_ONCE(rdp->gpwrap, false); 1737 WRITE_ONCE(rdp->gpwrap, false);
1717 rcu_gpnum_ovf(rnp, rdp); 1738 rcu_gpnum_ovf(rnp, rdp);
@@ -1939,7 +1960,7 @@ static void rcu_gp_fqs_loop(void)
1939 if (!ret) { 1960 if (!ret) {
1940 rcu_state.jiffies_force_qs = jiffies + j; 1961 rcu_state.jiffies_force_qs = jiffies + j;
1941 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1962 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1942 jiffies + 3 * j); 1963 jiffies + (j ? 3 * j : 2));
1943 } 1964 }
1944 trace_rcu_grace_period(rcu_state.name, 1965 trace_rcu_grace_period(rcu_state.name,
1945 READ_ONCE(rcu_state.gp_seq), 1966 READ_ONCE(rcu_state.gp_seq),
@@ -2497,14 +2518,14 @@ static void rcu_do_batch(struct rcu_data *rdp)
2497} 2518}
2498 2519
2499/* 2520/*
2500 * Check to see if this CPU is in a non-context-switch quiescent state 2521 * This function is invoked from each scheduling-clock interrupt,
2501 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 2522 * and checks to see if this CPU is in a non-context-switch quiescent
2502 * Also schedule RCU core processing. 2523 * state, for example, user mode or idle loop. It also schedules RCU
2503 * 2524 * core processing. If the current grace period has gone on too long,
2504 * This function must be called from hardirq context. It is normally 2525 * it will ask the scheduler to manufacture a context switch for the sole
2505 * invoked from the scheduling-clock interrupt. 2526 * purpose of providing a providing the needed quiescent state.
2506 */ 2527 */
2507void rcu_check_callbacks(int user) 2528void rcu_sched_clock_irq(int user)
2508{ 2529{
2509 trace_rcu_utilization(TPS("Start scheduler-tick")); 2530 trace_rcu_utilization(TPS("Start scheduler-tick"));
2510 raw_cpu_inc(rcu_data.ticks_this_gp); 2531 raw_cpu_inc(rcu_data.ticks_this_gp);
@@ -2517,7 +2538,7 @@ void rcu_check_callbacks(int user)
2517 } 2538 }
2518 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2539 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2519 } 2540 }
2520 rcu_flavor_check_callbacks(user); 2541 rcu_flavor_sched_clock_irq(user);
2521 if (rcu_pending()) 2542 if (rcu_pending())
2522 invoke_rcu_core(); 2543 invoke_rcu_core();
2523 2544
@@ -2578,7 +2599,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2578 * Force quiescent states on reluctant CPUs, and also detect which 2599 * Force quiescent states on reluctant CPUs, and also detect which
2579 * CPUs are in dyntick-idle mode. 2600 * CPUs are in dyntick-idle mode.
2580 */ 2601 */
2581static void force_quiescent_state(void) 2602void rcu_force_quiescent_state(void)
2582{ 2603{
2583 unsigned long flags; 2604 unsigned long flags;
2584 bool ret; 2605 bool ret;
@@ -2610,6 +2631,7 @@ static void force_quiescent_state(void)
2610 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2631 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2611 rcu_gp_kthread_wake(); 2632 rcu_gp_kthread_wake();
2612} 2633}
2634EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2613 2635
2614/* 2636/*
2615 * This function checks for grace-period requests that fail to motivate 2637 * This function checks for grace-period requests that fail to motivate
@@ -2657,16 +2679,11 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
2657 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2679 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2658 return; 2680 return;
2659 } 2681 }
2660 pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
2661 __func__, (long)READ_ONCE(rcu_state.gp_seq),
2662 (long)READ_ONCE(rnp_root->gp_seq_needed),
2663 j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
2664 rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
2665 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
2666 WARN_ON(1); 2682 WARN_ON(1);
2667 if (rnp_root != rnp) 2683 if (rnp_root != rnp)
2668 raw_spin_unlock_rcu_node(rnp_root); 2684 raw_spin_unlock_rcu_node(rnp_root);
2669 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2685 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2686 show_rcu_gp_kthreads();
2670} 2687}
2671 2688
2672/* 2689/*
@@ -2711,12 +2728,8 @@ void rcu_fwd_progress_check(unsigned long j)
2711} 2728}
2712EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); 2729EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
2713 2730
2714/* 2731/* Perform RCU core processing work for the current CPU. */
2715 * This does the RCU core processing work for the specified rcu_data 2732static __latent_entropy void rcu_core(struct softirq_action *unused)
2716 * structures. This may be called only from the CPU to whom the rdp
2717 * belongs.
2718 */
2719static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2720{ 2733{
2721 unsigned long flags; 2734 unsigned long flags;
2722 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2735 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
@@ -2801,9 +2814,9 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2801 2814
2802 /* 2815 /*
2803 * Force the grace period if too many callbacks or too long waiting. 2816 * Force the grace period if too many callbacks or too long waiting.
2804 * Enforce hysteresis, and don't invoke force_quiescent_state() 2817 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2805 * if some other CPU has recently done so. Also, don't bother 2818 * if some other CPU has recently done so. Also, don't bother
2806 * invoking force_quiescent_state() if the newly enqueued callback 2819 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2807 * is the only one waiting for a grace period to complete. 2820 * is the only one waiting for a grace period to complete.
2808 */ 2821 */
2809 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2822 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
@@ -2820,7 +2833,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2820 rdp->blimit = LONG_MAX; 2833 rdp->blimit = LONG_MAX;
2821 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2834 if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2822 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2835 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2823 force_quiescent_state(); 2836 rcu_force_quiescent_state();
2824 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2837 rdp->n_force_qs_snap = rcu_state.n_force_qs;
2825 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2838 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2826 } 2839 }
@@ -2889,9 +2902,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
2889 rcu_segcblist_init(&rdp->cblist); 2902 rcu_segcblist_init(&rdp->cblist);
2890 } 2903 }
2891 rcu_segcblist_enqueue(&rdp->cblist, head, lazy); 2904 rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
2892 if (!lazy)
2893 rcu_idle_count_callbacks_posted();
2894
2895 if (__is_kfree_rcu_offset((unsigned long)func)) 2905 if (__is_kfree_rcu_offset((unsigned long)func))
2896 trace_rcu_kfree_callback(rcu_state.name, head, 2906 trace_rcu_kfree_callback(rcu_state.name, head,
2897 (unsigned long)func, 2907 (unsigned long)func,
@@ -2961,6 +2971,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
2961} 2971}
2962EXPORT_SYMBOL_GPL(kfree_call_rcu); 2972EXPORT_SYMBOL_GPL(kfree_call_rcu);
2963 2973
2974/*
2975 * During early boot, any blocking grace-period wait automatically
2976 * implies a grace period. Later on, this is never the case for PREEMPT.
2977 *
2978 * Howevr, because a context switch is a grace period for !PREEMPT, any
2979 * blocking grace-period wait automatically implies a grace period if
2980 * there is only one CPU online at any point time during execution of
2981 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
2982 * occasionally incorrectly indicate that there are multiple CPUs online
2983 * when there was in fact only one the whole time, as this just adds some
2984 * overhead: RCU still operates correctly.
2985 */
2986static int rcu_blocking_is_gp(void)
2987{
2988 int ret;
2989
2990 if (IS_ENABLED(CONFIG_PREEMPT))
2991 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
2992 might_sleep(); /* Check for RCU read-side critical section. */
2993 preempt_disable();
2994 ret = num_online_cpus() <= 1;
2995 preempt_enable();
2996 return ret;
2997}
2998
2999/**
3000 * synchronize_rcu - wait until a grace period has elapsed.
3001 *
3002 * Control will return to the caller some time after a full grace
3003 * period has elapsed, in other words after all currently executing RCU
3004 * read-side critical sections have completed. Note, however, that
3005 * upon return from synchronize_rcu(), the caller might well be executing
3006 * concurrently with new RCU read-side critical sections that began while
3007 * synchronize_rcu() was waiting. RCU read-side critical sections are
3008 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3009 * In addition, regions of code across which interrupts, preemption, or
3010 * softirqs have been disabled also serve as RCU read-side critical
3011 * sections. This includes hardware interrupt handlers, softirq handlers,
3012 * and NMI handlers.
3013 *
3014 * Note that this guarantee implies further memory-ordering guarantees.
3015 * On systems with more than one CPU, when synchronize_rcu() returns,
3016 * each CPU is guaranteed to have executed a full memory barrier since
3017 * the end of its last RCU read-side critical section whose beginning
3018 * preceded the call to synchronize_rcu(). In addition, each CPU having
3019 * an RCU read-side critical section that extends beyond the return from
3020 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3021 * after the beginning of synchronize_rcu() and before the beginning of
3022 * that RCU read-side critical section. Note that these guarantees include
3023 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3024 * that are executing in the kernel.
3025 *
3026 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3027 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3028 * to have executed a full memory barrier during the execution of
3029 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3030 * again only if the system has more than one CPU).
3031 */
3032void synchronize_rcu(void)
3033{
3034 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3035 lock_is_held(&rcu_lock_map) ||
3036 lock_is_held(&rcu_sched_lock_map),
3037 "Illegal synchronize_rcu() in RCU read-side critical section");
3038 if (rcu_blocking_is_gp())
3039 return;
3040 if (rcu_gp_is_expedited())
3041 synchronize_rcu_expedited();
3042 else
3043 wait_rcu_gp(call_rcu);
3044}
3045EXPORT_SYMBOL_GPL(synchronize_rcu);
3046
2964/** 3047/**
2965 * get_state_synchronize_rcu - Snapshot current RCU state 3048 * get_state_synchronize_rcu - Snapshot current RCU state
2966 * 3049 *
@@ -3049,28 +3132,6 @@ static int rcu_pending(void)
3049} 3132}
3050 3133
3051/* 3134/*
3052 * Return true if the specified CPU has any callback. If all_lazy is
3053 * non-NULL, store an indication of whether all callbacks are lazy.
3054 * (If there are no callbacks, all of them are deemed to be lazy.)
3055 */
3056static bool rcu_cpu_has_callbacks(bool *all_lazy)
3057{
3058 bool al = true;
3059 bool hc = false;
3060 struct rcu_data *rdp;
3061
3062 rdp = this_cpu_ptr(&rcu_data);
3063 if (!rcu_segcblist_empty(&rdp->cblist)) {
3064 hc = true;
3065 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
3066 al = false;
3067 }
3068 if (all_lazy)
3069 *all_lazy = al;
3070 return hc;
3071}
3072
3073/*
3074 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3135 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3075 * the compiler is expected to optimize this away. 3136 * the compiler is expected to optimize this away.
3076 */ 3137 */
@@ -3299,7 +3360,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
3299 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 3360 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3300 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3361 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3301 rcu_prepare_kthreads(cpu); 3362 rcu_prepare_kthreads(cpu);
3302 rcu_spawn_all_nocb_kthreads(cpu); 3363 rcu_spawn_cpu_nocb_kthread(cpu);
3303 3364
3304 return 0; 3365 return 0;
3305} 3366}
@@ -3329,8 +3390,6 @@ int rcutree_online_cpu(unsigned int cpu)
3329 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3390 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3330 rnp->ffmask |= rdp->grpmask; 3391 rnp->ffmask |= rdp->grpmask;
3331 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3392 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3332 if (IS_ENABLED(CONFIG_TREE_SRCU))
3333 srcu_online_cpu(cpu);
3334 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3393 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3335 return 0; /* Too early in boot for scheduler work. */ 3394 return 0; /* Too early in boot for scheduler work. */
3336 sync_sched_exp_online_cleanup(cpu); 3395 sync_sched_exp_online_cleanup(cpu);
@@ -3355,8 +3414,6 @@ int rcutree_offline_cpu(unsigned int cpu)
3355 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3414 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3356 3415
3357 rcutree_affinity_setting(cpu, cpu); 3416 rcutree_affinity_setting(cpu, cpu);
3358 if (IS_ENABLED(CONFIG_TREE_SRCU))
3359 srcu_offline_cpu(cpu);
3360 return 0; 3417 return 0;
3361} 3418}
3362 3419
@@ -3777,7 +3834,7 @@ void __init rcu_init(void)
3777 rcu_init_one(); 3834 rcu_init_one();
3778 if (dump_tree) 3835 if (dump_tree)
3779 rcu_dump_rcu_node_tree(); 3836 rcu_dump_rcu_node_tree();
3780 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 3837 open_softirq(RCU_SOFTIRQ, rcu_core);
3781 3838
3782 /* 3839 /*
3783 * We don't need protection against CPU-hotplug here because 3840 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d90b02b53c0e..bb4f995f2d3f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -1,25 +1,12 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions. 4 * Internal non-public definitions.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008 6 * Copyright IBM Corporation, 2008
20 * 7 *
21 * Author: Ingo Molnar <mingo@elte.hu> 8 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com>
23 */ 10 */
24 11
25#include <linux/cache.h> 12#include <linux/cache.h>
@@ -36,7 +23,6 @@
36 23
37/* Communicate arguments to a workqueue handler. */ 24/* Communicate arguments to a workqueue handler. */
38struct rcu_exp_work { 25struct rcu_exp_work {
39 smp_call_func_t rew_func;
40 unsigned long rew_s; 26 unsigned long rew_s;
41 struct work_struct rew_work; 27 struct work_struct rew_work;
42}; 28};
@@ -194,10 +180,7 @@ struct rcu_data {
194 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ 180 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
195 bool rcu_urgent_qs; /* GP old need light quiescent state. */ 181 bool rcu_urgent_qs; /* GP old need light quiescent state. */
196#ifdef CONFIG_RCU_FAST_NO_HZ 182#ifdef CONFIG_RCU_FAST_NO_HZ
197 bool all_lazy; /* Are all CPU's CBs lazy? */ 183 bool all_lazy; /* All CPU's CBs lazy at idle start? */
198 unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */
199 unsigned long nonlazy_posted_snap;
200 /* Nonlazy_posted snapshot. */
201 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ 184 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
202 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ 185 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
203 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 186 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
@@ -234,7 +217,13 @@ struct rcu_data {
234 /* Leader CPU takes GP-end wakeups. */ 217 /* Leader CPU takes GP-end wakeups. */
235#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 218#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
236 219
237 /* 6) Diagnostic data, including RCU CPU stall warnings. */ 220 /* 6) RCU priority boosting. */
221 struct task_struct *rcu_cpu_kthread_task;
222 /* rcuc per-CPU kthread or NULL. */
223 unsigned int rcu_cpu_kthread_status;
224 char rcu_cpu_has_work;
225
226 /* 7) Diagnostic data, including RCU CPU stall warnings. */
238 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 227 unsigned int softirq_snap; /* Snapshot of softirq activity. */
239 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ 228 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
240 struct irq_work rcu_iw; /* Check for non-irq activity. */ 229 struct irq_work rcu_iw; /* Check for non-irq activity. */
@@ -303,6 +292,8 @@ struct rcu_state {
303 struct swait_queue_head gp_wq; /* Where GP task waits. */ 292 struct swait_queue_head gp_wq; /* Where GP task waits. */
304 short gp_flags; /* Commands for GP task. */ 293 short gp_flags; /* Commands for GP task. */
305 short gp_state; /* GP kthread sleep state. */ 294 short gp_state; /* GP kthread sleep state. */
295 unsigned long gp_wake_time; /* Last GP kthread wake. */
296 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
306 297
307 /* End of fields guarded by root rcu_node's lock. */ 298 /* End of fields guarded by root rcu_node's lock. */
308 299
@@ -402,13 +393,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
402 393
403int rcu_dynticks_snap(struct rcu_data *rdp); 394int rcu_dynticks_snap(struct rcu_data *rdp);
404 395
405#ifdef CONFIG_RCU_BOOST
406DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
407DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
408DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
409DECLARE_PER_CPU(char, rcu_cpu_has_work);
410#endif /* #ifdef CONFIG_RCU_BOOST */
411
412/* Forward declarations for rcutree_plugin.h */ 396/* Forward declarations for rcutree_plugin.h */
413static void rcu_bootup_announce(void); 397static void rcu_bootup_announce(void);
414static void rcu_qs(void); 398static void rcu_qs(void);
@@ -420,7 +404,7 @@ static void rcu_print_detail_task_stall(void);
420static int rcu_print_task_stall(struct rcu_node *rnp); 404static int rcu_print_task_stall(struct rcu_node *rnp);
421static int rcu_print_task_exp_stall(struct rcu_node *rnp); 405static int rcu_print_task_exp_stall(struct rcu_node *rnp);
422static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 406static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
423static void rcu_flavor_check_callbacks(int user); 407static void rcu_flavor_sched_clock_irq(int user);
424void call_rcu(struct rcu_head *head, rcu_callback_t func); 408void call_rcu(struct rcu_head *head, rcu_callback_t func);
425static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 409static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
426static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 410static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
@@ -431,7 +415,6 @@ static void __init rcu_spawn_boost_kthreads(void);
431static void rcu_prepare_kthreads(int cpu); 415static void rcu_prepare_kthreads(int cpu);
432static void rcu_cleanup_after_idle(void); 416static void rcu_cleanup_after_idle(void);
433static void rcu_prepare_for_idle(void); 417static void rcu_prepare_for_idle(void);
434static void rcu_idle_count_callbacks_posted(void);
435static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 418static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
436static bool rcu_preempt_need_deferred_qs(struct task_struct *t); 419static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
437static void rcu_preempt_deferred_qs(struct task_struct *t); 420static void rcu_preempt_deferred_qs(struct task_struct *t);
@@ -451,7 +434,7 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
451static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); 434static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
452static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 435static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
453static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 436static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
454static void rcu_spawn_all_nocb_kthreads(int cpu); 437static void rcu_spawn_cpu_nocb_kthread(int cpu);
455static void __init rcu_spawn_nocb_kthreads(void); 438static void __init rcu_spawn_nocb_kthreads(void);
456#ifdef CONFIG_RCU_NOCB_CPU 439#ifdef CONFIG_RCU_NOCB_CPU
457static void __init rcu_organize_nocb_kthreads(void); 440static void __init rcu_organize_nocb_kthreads(void);
@@ -462,11 +445,3 @@ static void rcu_bind_gp_kthread(void);
462static bool rcu_nohz_full_cpu(void); 445static bool rcu_nohz_full_cpu(void);
463static void rcu_dynticks_task_enter(void); 446static void rcu_dynticks_task_enter(void);
464static void rcu_dynticks_task_exit(void); 447static void rcu_dynticks_task_exit(void);
465
466#ifdef CONFIG_SRCU
467void srcu_online_cpu(unsigned int cpu);
468void srcu_offline_cpu(unsigned int cpu);
469#else /* #ifdef CONFIG_SRCU */
470void srcu_online_cpu(unsigned int cpu) { }
471void srcu_offline_cpu(unsigned int cpu) { }
472#endif /* #else #ifdef CONFIG_SRCU */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 928fe5893a57..4c2a0189e748 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -1,27 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * RCU expedited grace periods 3 * RCU expedited grace periods
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2016 5 * Copyright IBM Corporation, 2016
19 * 6 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
21 */ 8 */
22 9
23#include <linux/lockdep.h> 10#include <linux/lockdep.h>
24 11
12static void rcu_exp_handler(void *unused);
13
25/* 14/*
26 * Record the start of an expedited grace period. 15 * Record the start of an expedited grace period.
27 */ 16 */
@@ -344,7 +333,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
344{ 333{
345 int cpu; 334 int cpu;
346 unsigned long flags; 335 unsigned long flags;
347 smp_call_func_t func;
348 unsigned long mask_ofl_test; 336 unsigned long mask_ofl_test;
349 unsigned long mask_ofl_ipi; 337 unsigned long mask_ofl_ipi;
350 int ret; 338 int ret;
@@ -352,7 +340,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
352 container_of(wp, struct rcu_exp_work, rew_work); 340 container_of(wp, struct rcu_exp_work, rew_work);
353 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 341 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
354 342
355 func = rewp->rew_func;
356 raw_spin_lock_irqsave_rcu_node(rnp, flags); 343 raw_spin_lock_irqsave_rcu_node(rnp, flags);
357 344
358 /* Each pass checks a CPU for identity, offline, and idle. */ 345 /* Each pass checks a CPU for identity, offline, and idle. */
@@ -396,7 +383,7 @@ retry_ipi:
396 mask_ofl_test |= mask; 383 mask_ofl_test |= mask;
397 continue; 384 continue;
398 } 385 }
399 ret = smp_call_function_single(cpu, func, NULL, 0); 386 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
400 if (!ret) { 387 if (!ret) {
401 mask_ofl_ipi &= ~mask; 388 mask_ofl_ipi &= ~mask;
402 continue; 389 continue;
@@ -426,7 +413,7 @@ retry_ipi:
426 * Select the nodes that the upcoming expedited grace period needs 413 * Select the nodes that the upcoming expedited grace period needs
427 * to wait for. 414 * to wait for.
428 */ 415 */
429static void sync_rcu_exp_select_cpus(smp_call_func_t func) 416static void sync_rcu_exp_select_cpus(void)
430{ 417{
431 int cpu; 418 int cpu;
432 struct rcu_node *rnp; 419 struct rcu_node *rnp;
@@ -440,7 +427,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
440 rnp->exp_need_flush = false; 427 rnp->exp_need_flush = false;
441 if (!READ_ONCE(rnp->expmask)) 428 if (!READ_ONCE(rnp->expmask))
442 continue; /* Avoid early boot non-existent wq. */ 429 continue; /* Avoid early boot non-existent wq. */
443 rnp->rew.rew_func = func;
444 if (!READ_ONCE(rcu_par_gp_wq) || 430 if (!READ_ONCE(rcu_par_gp_wq) ||
445 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 431 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
446 rcu_is_last_leaf_node(rnp)) { 432 rcu_is_last_leaf_node(rnp)) {
@@ -449,7 +435,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
449 continue; 435 continue;
450 } 436 }
451 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 437 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
452 preempt_disable();
453 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 438 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
454 /* If all offline, queue the work on an unbound CPU. */ 439 /* If all offline, queue the work on an unbound CPU. */
455 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 440 if (unlikely(cpu > rnp->grphi - rnp->grplo))
@@ -457,7 +442,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
457 else 442 else
458 cpu += rnp->grplo; 443 cpu += rnp->grplo;
459 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 444 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
460 preempt_enable();
461 rnp->exp_need_flush = true; 445 rnp->exp_need_flush = true;
462 } 446 }
463 447
@@ -580,10 +564,10 @@ static void rcu_exp_wait_wake(unsigned long s)
580 * Common code to drive an expedited grace period forward, used by 564 * Common code to drive an expedited grace period forward, used by
581 * workqueues and mid-boot-time tasks. 565 * workqueues and mid-boot-time tasks.
582 */ 566 */
583static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s) 567static void rcu_exp_sel_wait_wake(unsigned long s)
584{ 568{
585 /* Initialize the rcu_node tree in preparation for the wait. */ 569 /* Initialize the rcu_node tree in preparation for the wait. */
586 sync_rcu_exp_select_cpus(func); 570 sync_rcu_exp_select_cpus();
587 571
588 /* Wait and clean up, including waking everyone. */ 572 /* Wait and clean up, including waking everyone. */
589 rcu_exp_wait_wake(s); 573 rcu_exp_wait_wake(s);
@@ -597,52 +581,7 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
597 struct rcu_exp_work *rewp; 581 struct rcu_exp_work *rewp;
598 582
599 rewp = container_of(wp, struct rcu_exp_work, rew_work); 583 rewp = container_of(wp, struct rcu_exp_work, rew_work);
600 rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s); 584 rcu_exp_sel_wait_wake(rewp->rew_s);
601}
602
603/*
604 * Given a smp_call_function() handler, kick off the specified
605 * implementation of expedited grace period.
606 */
607static void _synchronize_rcu_expedited(smp_call_func_t func)
608{
609 struct rcu_data *rdp;
610 struct rcu_exp_work rew;
611 struct rcu_node *rnp;
612 unsigned long s;
613
614 /* If expedited grace periods are prohibited, fall back to normal. */
615 if (rcu_gp_is_normal()) {
616 wait_rcu_gp(call_rcu);
617 return;
618 }
619
620 /* Take a snapshot of the sequence number. */
621 s = rcu_exp_gp_seq_snap();
622 if (exp_funnel_lock(s))
623 return; /* Someone else did our work for us. */
624
625 /* Ensure that load happens before action based on it. */
626 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
627 /* Direct call during scheduler init and early_initcalls(). */
628 rcu_exp_sel_wait_wake(func, s);
629 } else {
630 /* Marshall arguments & schedule the expedited grace period. */
631 rew.rew_func = func;
632 rew.rew_s = s;
633 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
634 queue_work(rcu_gp_wq, &rew.rew_work);
635 }
636
637 /* Wait for expedited grace period to complete. */
638 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
639 rnp = rcu_get_root();
640 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
641 sync_exp_work_done(s));
642 smp_mb(); /* Workqueue actions happen before return. */
643
644 /* Let the next expedited grace period start. */
645 mutex_unlock(&rcu_state.exp_mutex);
646} 585}
647 586
648#ifdef CONFIG_PREEMPT_RCU 587#ifdef CONFIG_PREEMPT_RCU
@@ -654,7 +593,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
654 * ->expmask fields in the rcu_node tree. Otherwise, immediately 593 * ->expmask fields in the rcu_node tree. Otherwise, immediately
655 * report the quiescent state. 594 * report the quiescent state.
656 */ 595 */
657static void sync_rcu_exp_handler(void *unused) 596static void rcu_exp_handler(void *unused)
658{ 597{
659 unsigned long flags; 598 unsigned long flags;
660 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 599 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -697,6 +636,7 @@ static void sync_rcu_exp_handler(void *unused)
697 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); 636 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
698 } 637 }
699 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 638 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
639 return;
700 } 640 }
701 641
702 /* 642 /*
@@ -730,43 +670,10 @@ static void sync_sched_exp_online_cleanup(int cpu)
730{ 670{
731} 671}
732 672
733/**
734 * synchronize_rcu_expedited - Brute-force RCU grace period
735 *
736 * Wait for an RCU-preempt grace period, but expedite it. The basic
737 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
738 * checks whether the CPU is in an RCU-preempt critical section, and
739 * if so, it sets a flag that causes the outermost rcu_read_unlock()
740 * to report the quiescent state. On the other hand, if the CPU is
741 * not in an RCU read-side critical section, the IPI handler reports
742 * the quiescent state immediately.
743 *
744 * Although this is a greate improvement over previous expedited
745 * implementations, it is still unfriendly to real-time workloads, so is
746 * thus not recommended for any sort of common-case code. In fact, if
747 * you are using synchronize_rcu_expedited() in a loop, please restructure
748 * your code to batch your updates, and then Use a single synchronize_rcu()
749 * instead.
750 *
751 * This has the same semantics as (but is more brutal than) synchronize_rcu().
752 */
753void synchronize_rcu_expedited(void)
754{
755 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
756 lock_is_held(&rcu_lock_map) ||
757 lock_is_held(&rcu_sched_lock_map),
758 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
759
760 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
761 return;
762 _synchronize_rcu_expedited(sync_rcu_exp_handler);
763}
764EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
765
766#else /* #ifdef CONFIG_PREEMPT_RCU */ 673#else /* #ifdef CONFIG_PREEMPT_RCU */
767 674
768/* Invoked on each online non-idle CPU for expedited quiescent state. */ 675/* Invoked on each online non-idle CPU for expedited quiescent state. */
769static void sync_sched_exp_handler(void *unused) 676static void rcu_exp_handler(void *unused)
770{ 677{
771 struct rcu_data *rdp; 678 struct rcu_data *rdp;
772 struct rcu_node *rnp; 679 struct rcu_node *rnp;
@@ -798,44 +705,78 @@ static void sync_sched_exp_online_cleanup(int cpu)
798 rnp = rdp->mynode; 705 rnp = rdp->mynode;
799 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) 706 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
800 return; 707 return;
801 ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0); 708 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
802 WARN_ON_ONCE(ret); 709 WARN_ON_ONCE(ret);
803} 710}
804 711
805/* 712#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
806 * Because a context switch is a grace period for !PREEMPT, any
807 * blocking grace-period wait automatically implies a grace period if
808 * there is only one CPU online at any point time during execution of
809 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
810 * occasionally incorrectly indicate that there are multiple CPUs online
811 * when there was in fact only one the whole time, as this just adds some
812 * overhead: RCU still operates correctly.
813 */
814static int rcu_blocking_is_gp(void)
815{
816 int ret;
817
818 might_sleep(); /* Check for RCU read-side critical section. */
819 preempt_disable();
820 ret = num_online_cpus() <= 1;
821 preempt_enable();
822 return ret;
823}
824 713
825/* PREEMPT=n implementation of synchronize_rcu_expedited(). */ 714/**
715 * synchronize_rcu_expedited - Brute-force RCU grace period
716 *
717 * Wait for an RCU grace period, but expedite it. The basic idea is to
718 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
719 * the CPU is in an RCU critical section, and if so, it sets a flag that
720 * causes the outermost rcu_read_unlock() to report the quiescent state
721 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
722 * other hand, if the CPU is not in an RCU read-side critical section,
723 * the IPI handler reports the quiescent state immediately.
724 *
725 * Although this is a greate improvement over previous expedited
726 * implementations, it is still unfriendly to real-time workloads, so is
727 * thus not recommended for any sort of common-case code. In fact, if
728 * you are using synchronize_rcu_expedited() in a loop, please restructure
729 * your code to batch your updates, and then Use a single synchronize_rcu()
730 * instead.
731 *
732 * This has the same semantics as (but is more brutal than) synchronize_rcu().
733 */
826void synchronize_rcu_expedited(void) 734void synchronize_rcu_expedited(void)
827{ 735{
736 struct rcu_data *rdp;
737 struct rcu_exp_work rew;
738 struct rcu_node *rnp;
739 unsigned long s;
740
828 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 741 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
829 lock_is_held(&rcu_lock_map) || 742 lock_is_held(&rcu_lock_map) ||
830 lock_is_held(&rcu_sched_lock_map), 743 lock_is_held(&rcu_sched_lock_map),
831 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 744 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
832 745
833 /* If only one CPU, this is automatically a grace period. */ 746 /* Is the state is such that the call is a grace period? */
834 if (rcu_blocking_is_gp()) 747 if (rcu_blocking_is_gp())
835 return; 748 return;
836 749
837 _synchronize_rcu_expedited(sync_sched_exp_handler); 750 /* If expedited grace periods are prohibited, fall back to normal. */
751 if (rcu_gp_is_normal()) {
752 wait_rcu_gp(call_rcu);
753 return;
754 }
755
756 /* Take a snapshot of the sequence number. */
757 s = rcu_exp_gp_seq_snap();
758 if (exp_funnel_lock(s))
759 return; /* Someone else did our work for us. */
760
761 /* Ensure that load happens before action based on it. */
762 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
763 /* Direct call during scheduler init and early_initcalls(). */
764 rcu_exp_sel_wait_wake(s);
765 } else {
766 /* Marshall arguments & schedule the expedited grace period. */
767 rew.rew_s = s;
768 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
769 queue_work(rcu_gp_wq, &rew.rew_work);
770 }
771
772 /* Wait for expedited grace period to complete. */
773 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
774 rnp = rcu_get_root();
775 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
776 sync_exp_work_done(s));
777 smp_mb(); /* Workqueue actions happen before return. */
778
779 /* Let the next expedited grace period start. */
780 mutex_unlock(&rcu_state.exp_mutex);
838} 781}
839EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 782EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
840
841#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1b3dd2fc0cd6..97dba50f6fb2 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1,27 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic 4 * Internal non-public definitions that provide either classic
4 * or preemptible semantics. 5 * or preemptible semantics.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 * Copyright Red Hat, 2009 7 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009 8 * Copyright IBM Corporation, 2009
22 * 9 *
23 * Author: Ingo Molnar <mingo@elte.hu> 10 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 11 * Paul E. McKenney <paulmck@linux.ibm.com>
25 */ 12 */
26 13
27#include <linux/delay.h> 14#include <linux/delay.h>
@@ -34,17 +21,7 @@
34#include "../time/tick-internal.h" 21#include "../time/tick-internal.h"
35 22
36#ifdef CONFIG_RCU_BOOST 23#ifdef CONFIG_RCU_BOOST
37
38#include "../locking/rtmutex_common.h" 24#include "../locking/rtmutex_common.h"
39
40/*
41 * Control variables for per-CPU and per-rcu_node kthreads.
42 */
43static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
44DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
45DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
46DEFINE_PER_CPU(char, rcu_cpu_has_work);
47
48#else /* #ifdef CONFIG_RCU_BOOST */ 25#else /* #ifdef CONFIG_RCU_BOOST */
49 26
50/* 27/*
@@ -307,7 +284,7 @@ static void rcu_qs(void)
307 __this_cpu_read(rcu_data.gp_seq), 284 __this_cpu_read(rcu_data.gp_seq),
308 TPS("cpuqs")); 285 TPS("cpuqs"));
309 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 286 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
310 barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */ 287 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
311 current->rcu_read_unlock_special.b.need_qs = false; 288 current->rcu_read_unlock_special.b.need_qs = false;
312 } 289 }
313} 290}
@@ -788,13 +765,13 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
788} 765}
789 766
790/* 767/*
791 * Check for a quiescent state from the current CPU. When a task blocks, 768 * Check for a quiescent state from the current CPU, including voluntary
792 * the task is recorded in the corresponding CPU's rcu_node structure, 769 * context switches for Tasks RCU. When a task blocks, the task is
793 * which is checked elsewhere. 770 * recorded in the corresponding CPU's rcu_node structure, which is checked
794 * 771 * elsewhere, hence this function need only check for quiescent states
795 * Caller must disable hard irqs. 772 * related to the current CPU, not to those related to tasks.
796 */ 773 */
797static void rcu_flavor_check_callbacks(int user) 774static void rcu_flavor_sched_clock_irq(int user)
798{ 775{
799 struct task_struct *t = current; 776 struct task_struct *t = current;
800 777
@@ -825,54 +802,6 @@ static void rcu_flavor_check_callbacks(int user)
825 t->rcu_read_unlock_special.b.need_qs = true; 802 t->rcu_read_unlock_special.b.need_qs = true;
826} 803}
827 804
828/**
829 * synchronize_rcu - wait until a grace period has elapsed.
830 *
831 * Control will return to the caller some time after a full grace
832 * period has elapsed, in other words after all currently executing RCU
833 * read-side critical sections have completed. Note, however, that
834 * upon return from synchronize_rcu(), the caller might well be executing
835 * concurrently with new RCU read-side critical sections that began while
836 * synchronize_rcu() was waiting. RCU read-side critical sections are
837 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
838 * In addition, regions of code across which interrupts, preemption, or
839 * softirqs have been disabled also serve as RCU read-side critical
840 * sections. This includes hardware interrupt handlers, softirq handlers,
841 * and NMI handlers.
842 *
843 * Note that this guarantee implies further memory-ordering guarantees.
844 * On systems with more than one CPU, when synchronize_rcu() returns,
845 * each CPU is guaranteed to have executed a full memory barrier since
846 * the end of its last RCU read-side critical section whose beginning
847 * preceded the call to synchronize_rcu(). In addition, each CPU having
848 * an RCU read-side critical section that extends beyond the return from
849 * synchronize_rcu() is guaranteed to have executed a full memory barrier
850 * after the beginning of synchronize_rcu() and before the beginning of
851 * that RCU read-side critical section. Note that these guarantees include
852 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
853 * that are executing in the kernel.
854 *
855 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
856 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
857 * to have executed a full memory barrier during the execution of
858 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
859 * again only if the system has more than one CPU).
860 */
861void synchronize_rcu(void)
862{
863 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
864 lock_is_held(&rcu_lock_map) ||
865 lock_is_held(&rcu_sched_lock_map),
866 "Illegal synchronize_rcu() in RCU read-side critical section");
867 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
868 return;
869 if (rcu_gp_is_expedited())
870 synchronize_rcu_expedited();
871 else
872 wait_rcu_gp(call_rcu);
873}
874EXPORT_SYMBOL_GPL(synchronize_rcu);
875
876/* 805/*
877 * Check for a task exiting while in a preemptible-RCU read-side 806 * Check for a task exiting while in a preemptible-RCU read-side
878 * critical section, clean up if so. No need to issue warnings, 807 * critical section, clean up if so. No need to issue warnings,
@@ -1088,14 +1017,10 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1088} 1017}
1089 1018
1090/* 1019/*
1091 * Check to see if this CPU is in a non-context-switch quiescent state 1020 * Check to see if this CPU is in a non-context-switch quiescent state,
1092 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 1021 * namely user mode and idle loop.
1093 * Also schedule RCU core processing.
1094 *
1095 * This function must be called from hardirq context. It is normally
1096 * invoked from the scheduling-clock interrupt.
1097 */ 1022 */
1098static void rcu_flavor_check_callbacks(int user) 1023static void rcu_flavor_sched_clock_irq(int user)
1099{ 1024{
1100 if (user || rcu_is_cpu_rrupt_from_idle()) { 1025 if (user || rcu_is_cpu_rrupt_from_idle()) {
1101 1026
@@ -1115,22 +1040,6 @@ static void rcu_flavor_check_callbacks(int user)
1115 } 1040 }
1116} 1041}
1117 1042
1118/* PREEMPT=n implementation of synchronize_rcu(). */
1119void synchronize_rcu(void)
1120{
1121 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
1122 lock_is_held(&rcu_lock_map) ||
1123 lock_is_held(&rcu_sched_lock_map),
1124 "Illegal synchronize_rcu() in RCU read-side critical section");
1125 if (rcu_blocking_is_gp())
1126 return;
1127 if (rcu_gp_is_expedited())
1128 synchronize_rcu_expedited();
1129 else
1130 wait_rcu_gp(call_rcu);
1131}
1132EXPORT_SYMBOL_GPL(synchronize_rcu);
1133
1134/* 1043/*
1135 * Because preemptible RCU does not exist, tasks cannot possibly exit 1044 * Because preemptible RCU does not exist, tasks cannot possibly exit
1136 * while in preemptible RCU read-side critical sections. 1045 * while in preemptible RCU read-side critical sections.
@@ -1307,11 +1216,11 @@ static void invoke_rcu_callbacks_kthread(void)
1307 unsigned long flags; 1216 unsigned long flags;
1308 1217
1309 local_irq_save(flags); 1218 local_irq_save(flags);
1310 __this_cpu_write(rcu_cpu_has_work, 1); 1219 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
1311 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1220 if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
1312 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1221 current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
1313 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1222 rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
1314 __this_cpu_read(rcu_cpu_kthread_status)); 1223 __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
1315 } 1224 }
1316 local_irq_restore(flags); 1225 local_irq_restore(flags);
1317} 1226}
@@ -1322,7 +1231,7 @@ static void invoke_rcu_callbacks_kthread(void)
1322 */ 1231 */
1323static bool rcu_is_callbacks_kthread(void) 1232static bool rcu_is_callbacks_kthread(void)
1324{ 1233{
1325 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1234 return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
1326} 1235}
1327 1236
1328#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1237#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
@@ -1369,11 +1278,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1369 return 0; 1278 return 0;
1370} 1279}
1371 1280
1372static void rcu_kthread_do_work(void)
1373{
1374 rcu_do_batch(this_cpu_ptr(&rcu_data));
1375}
1376
1377static void rcu_cpu_kthread_setup(unsigned int cpu) 1281static void rcu_cpu_kthread_setup(unsigned int cpu)
1378{ 1282{
1379 struct sched_param sp; 1283 struct sched_param sp;
@@ -1384,12 +1288,12 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
1384 1288
1385static void rcu_cpu_kthread_park(unsigned int cpu) 1289static void rcu_cpu_kthread_park(unsigned int cpu)
1386{ 1290{
1387 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1291 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1388} 1292}
1389 1293
1390static int rcu_cpu_kthread_should_run(unsigned int cpu) 1294static int rcu_cpu_kthread_should_run(unsigned int cpu)
1391{ 1295{
1392 return __this_cpu_read(rcu_cpu_has_work); 1296 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
1393} 1297}
1394 1298
1395/* 1299/*
@@ -1399,21 +1303,20 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
1399 */ 1303 */
1400static void rcu_cpu_kthread(unsigned int cpu) 1304static void rcu_cpu_kthread(unsigned int cpu)
1401{ 1305{
1402 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1306 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
1403 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1307 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
1404 int spincnt; 1308 int spincnt;
1405 1309
1406 for (spincnt = 0; spincnt < 10; spincnt++) { 1310 for (spincnt = 0; spincnt < 10; spincnt++) {
1407 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1311 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1408 local_bh_disable(); 1312 local_bh_disable();
1409 *statusp = RCU_KTHREAD_RUNNING; 1313 *statusp = RCU_KTHREAD_RUNNING;
1410 this_cpu_inc(rcu_cpu_kthread_loops);
1411 local_irq_disable(); 1314 local_irq_disable();
1412 work = *workp; 1315 work = *workp;
1413 *workp = 0; 1316 *workp = 0;
1414 local_irq_enable(); 1317 local_irq_enable();
1415 if (work) 1318 if (work)
1416 rcu_kthread_do_work(); 1319 rcu_do_batch(this_cpu_ptr(&rcu_data));
1417 local_bh_enable(); 1320 local_bh_enable();
1418 if (*workp == 0) { 1321 if (*workp == 0) {
1419 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1322 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
@@ -1459,7 +1362,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1459} 1362}
1460 1363
1461static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1364static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1462 .store = &rcu_cpu_kthread_task, 1365 .store = &rcu_data.rcu_cpu_kthread_task,
1463 .thread_should_run = rcu_cpu_kthread_should_run, 1366 .thread_should_run = rcu_cpu_kthread_should_run,
1464 .thread_fn = rcu_cpu_kthread, 1367 .thread_fn = rcu_cpu_kthread,
1465 .thread_comm = "rcuc/%u", 1368 .thread_comm = "rcuc/%u",
@@ -1476,7 +1379,7 @@ static void __init rcu_spawn_boost_kthreads(void)
1476 int cpu; 1379 int cpu;
1477 1380
1478 for_each_possible_cpu(cpu) 1381 for_each_possible_cpu(cpu)
1479 per_cpu(rcu_cpu_has_work, cpu) = 0; 1382 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
1480 if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__)) 1383 if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
1481 return; 1384 return;
1482 rcu_for_each_leaf_node(rnp) 1385 rcu_for_each_leaf_node(rnp)
@@ -1543,7 +1446,7 @@ static void rcu_prepare_kthreads(int cpu)
1543int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1446int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1544{ 1447{
1545 *nextevt = KTIME_MAX; 1448 *nextevt = KTIME_MAX;
1546 return rcu_cpu_has_callbacks(NULL); 1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist);
1547} 1450}
1548 1451
1549/* 1452/*
@@ -1562,14 +1465,6 @@ static void rcu_prepare_for_idle(void)
1562{ 1465{
1563} 1466}
1564 1467
1565/*
1566 * Don't bother keeping a running count of the number of RCU callbacks
1567 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1568 */
1569static void rcu_idle_count_callbacks_posted(void)
1570{
1571}
1572
1573#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1468#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1574 1469
1575/* 1470/*
@@ -1652,11 +1547,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1652 1547
1653 lockdep_assert_irqs_disabled(); 1548 lockdep_assert_irqs_disabled();
1654 1549
1655 /* Snapshot to detect later posting of non-lazy callback. */
1656 rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
1657
1658 /* If no callbacks, RCU doesn't need the CPU. */ 1550 /* If no callbacks, RCU doesn't need the CPU. */
1659 if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) { 1551 if (rcu_segcblist_empty(&rdp->cblist)) {
1660 *nextevt = KTIME_MAX; 1552 *nextevt = KTIME_MAX;
1661 return 0; 1553 return 0;
1662 } 1554 }
@@ -1670,11 +1562,12 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1670 rdp->last_accelerate = jiffies; 1562 rdp->last_accelerate = jiffies;
1671 1563
1672 /* Request timer delay depending on laziness, and round. */ 1564 /* Request timer delay depending on laziness, and round. */
1673 if (!rdp->all_lazy) { 1565 rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist);
1566 if (rdp->all_lazy) {
1567 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1568 } else {
1674 dj = round_up(rcu_idle_gp_delay + jiffies, 1569 dj = round_up(rcu_idle_gp_delay + jiffies,
1675 rcu_idle_gp_delay) - jiffies; 1570 rcu_idle_gp_delay) - jiffies;
1676 } else {
1677 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1678 } 1571 }
1679 *nextevt = basemono + dj * TICK_NSEC; 1572 *nextevt = basemono + dj * TICK_NSEC;
1680 return 0; 1573 return 0;
@@ -1704,7 +1597,7 @@ static void rcu_prepare_for_idle(void)
1704 /* Handle nohz enablement switches conservatively. */ 1597 /* Handle nohz enablement switches conservatively. */
1705 tne = READ_ONCE(tick_nohz_active); 1598 tne = READ_ONCE(tick_nohz_active);
1706 if (tne != rdp->tick_nohz_enabled_snap) { 1599 if (tne != rdp->tick_nohz_enabled_snap) {
1707 if (rcu_cpu_has_callbacks(NULL)) 1600 if (!rcu_segcblist_empty(&rdp->cblist))
1708 invoke_rcu_core(); /* force nohz to see update. */ 1601 invoke_rcu_core(); /* force nohz to see update. */
1709 rdp->tick_nohz_enabled_snap = tne; 1602 rdp->tick_nohz_enabled_snap = tne;
1710 return; 1603 return;
@@ -1717,10 +1610,8 @@ static void rcu_prepare_for_idle(void)
1717 * callbacks, invoke RCU core for the side-effect of recalculating 1610 * callbacks, invoke RCU core for the side-effect of recalculating
1718 * idle duration on re-entry to idle. 1611 * idle duration on re-entry to idle.
1719 */ 1612 */
1720 if (rdp->all_lazy && 1613 if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) {
1721 rdp->nonlazy_posted != rdp->nonlazy_posted_snap) {
1722 rdp->all_lazy = false; 1614 rdp->all_lazy = false;
1723 rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
1724 invoke_rcu_core(); 1615 invoke_rcu_core();
1725 return; 1616 return;
1726 } 1617 }
@@ -1756,19 +1647,6 @@ static void rcu_cleanup_after_idle(void)
1756 invoke_rcu_core(); 1647 invoke_rcu_core();
1757} 1648}
1758 1649
1759/*
1760 * Keep a running count of the number of non-lazy callbacks posted
1761 * on this CPU. This running counter (which is never decremented) allows
1762 * rcu_prepare_for_idle() to detect when something out of the idle loop
1763 * posts a callback, even if an equal number of callbacks are invoked.
1764 * Of course, callbacks should only be posted from within a trace event
1765 * designed to be called from idle or from within RCU_NONIDLE().
1766 */
1767static void rcu_idle_count_callbacks_posted(void)
1768{
1769 __this_cpu_add(rcu_data.nonlazy_posted, 1);
1770}
1771
1772#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1650#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1773 1651
1774#ifdef CONFIG_RCU_FAST_NO_HZ 1652#ifdef CONFIG_RCU_FAST_NO_HZ
@@ -1776,13 +1654,12 @@ static void rcu_idle_count_callbacks_posted(void)
1776static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1654static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1777{ 1655{
1778 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 1656 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
1779 unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap;
1780 1657
1781 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1658 sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
1782 rdp->last_accelerate & 0xffff, jiffies & 0xffff, 1659 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
1783 ulong2long(nlpd), 1660 ".l"[rdp->all_lazy],
1784 rdp->all_lazy ? 'L' : '.', 1661 ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
1785 rdp->tick_nohz_enabled_snap ? '.' : 'D'); 1662 ".D"[!rdp->tick_nohz_enabled_snap]);
1786} 1663}
1787 1664
1788#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1665#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
@@ -1868,22 +1745,24 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1868 1745
1869/* 1746/*
1870 * Offload callback processing from the boot-time-specified set of CPUs 1747 * Offload callback processing from the boot-time-specified set of CPUs
1871 * specified by rcu_nocb_mask. For each CPU in the set, there is a 1748 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
1872 * kthread created that pulls the callbacks from the corresponding CPU, 1749 * created that pull the callbacks from the corresponding CPU, wait for
1873 * waits for a grace period to elapse, and invokes the callbacks. 1750 * a grace period to elapse, and invoke the callbacks. These kthreads
1874 * The no-CBs CPUs do a wake_up() on their kthread when they insert 1751 * are organized into leaders, which manage incoming callbacks, wait for
1875 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 1752 * grace periods, and awaken followers, and the followers, which only
1876 * has been specified, in which case each kthread actively polls its 1753 * invoke callbacks. Each leader is its own follower. The no-CBs CPUs
1877 * CPU. (Which isn't so great for energy efficiency, but which does 1754 * do a wake_up() on their kthread when they insert a callback into any
1878 * reduce RCU's overhead on that CPU.) 1755 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
1756 * in which case each kthread actively polls its CPU. (Which isn't so great
1757 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
1879 * 1758 *
1880 * This is intended to be used in conjunction with Frederic Weisbecker's 1759 * This is intended to be used in conjunction with Frederic Weisbecker's
1881 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1760 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1882 * running CPU-bound user-mode computations. 1761 * running CPU-bound user-mode computations.
1883 * 1762 *
1884 * Offloading of callback processing could also in theory be used as 1763 * Offloading of callbacks can also be used as an energy-efficiency
1885 * an energy-efficiency measure because CPUs with no RCU callbacks 1764 * measure because CPUs with no RCU callbacks queued are more aggressive
1886 * queued are more aggressive about entering dyntick-idle mode. 1765 * about entering dyntick-idle mode.
1887 */ 1766 */
1888 1767
1889 1768
@@ -1987,10 +1866,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
1987 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1866 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1988} 1867}
1989 1868
1990/* 1869/* Does rcu_barrier need to queue an RCU callback on the specified CPU? */
1991 * Does the specified CPU need an RCU callback for this invocation
1992 * of rcu_barrier()?
1993 */
1994static bool rcu_nocb_cpu_needs_barrier(int cpu) 1870static bool rcu_nocb_cpu_needs_barrier(int cpu)
1995{ 1871{
1996 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1872 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
@@ -2006,8 +1882,8 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
2006 * callbacks would be posted. In the worst case, the first 1882 * callbacks would be posted. In the worst case, the first
2007 * barrier in rcu_barrier() suffices (but the caller cannot 1883 * barrier in rcu_barrier() suffices (but the caller cannot
2008 * necessarily rely on this, not a substitute for the caller 1884 * necessarily rely on this, not a substitute for the caller
2009 * getting the concurrency design right!). There must also be 1885 * getting the concurrency design right!). There must also be a
2010 * a barrier between the following load an posting of a callback 1886 * barrier between the following load and posting of a callback
2011 * (if a callback is in fact needed). This is associated with an 1887 * (if a callback is in fact needed). This is associated with an
2012 * atomic_inc() in the caller. 1888 * atomic_inc() in the caller.
2013 */ 1889 */
@@ -2517,9 +2393,9 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
2517 2393
2518/* 2394/*
2519 * If the specified CPU is a no-CBs CPU that does not already have its 2395 * If the specified CPU is a no-CBs CPU that does not already have its
2520 * rcuo kthreads, spawn them. 2396 * rcuo kthread, spawn it.
2521 */ 2397 */
2522static void rcu_spawn_all_nocb_kthreads(int cpu) 2398static void rcu_spawn_cpu_nocb_kthread(int cpu)
2523{ 2399{
2524 if (rcu_scheduler_fully_active) 2400 if (rcu_scheduler_fully_active)
2525 rcu_spawn_one_nocb_kthread(cpu); 2401 rcu_spawn_one_nocb_kthread(cpu);
@@ -2536,7 +2412,7 @@ static void __init rcu_spawn_nocb_kthreads(void)
2536 int cpu; 2412 int cpu;
2537 2413
2538 for_each_online_cpu(cpu) 2414 for_each_online_cpu(cpu)
2539 rcu_spawn_all_nocb_kthreads(cpu); 2415 rcu_spawn_cpu_nocb_kthread(cpu);
2540} 2416}
2541 2417
2542/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2418/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
@@ -2670,7 +2546,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2670{ 2546{
2671} 2547}
2672 2548
2673static void rcu_spawn_all_nocb_kthreads(int cpu) 2549static void rcu_spawn_cpu_nocb_kthread(int cpu)
2674{ 2550{
2675} 2551}
2676 2552
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 1971869c4072..e3c6395c9b4c 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -1,26 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Read-Copy Update mechanism for mutual exclusion 3 * Read-Copy Update mechanism for mutual exclusion
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2001 5 * Copyright IBM Corporation, 2001
19 * 6 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com> 8 * Manfred Spraul <manfred@colorfullife.com>
22 * 9 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers: 12 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 22bd8980f32f..835671f0f917 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -48,8 +48,8 @@ EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
48 * 48 *
49 * Clear the update_util_data pointer for the given CPU. 49 * Clear the update_util_data pointer for the given CPU.
50 * 50 *
51 * Callers must use RCU-sched callbacks to free any memory that might be 51 * Callers must use RCU callbacks to free any memory that might be
52 * accessed via the old update_util_data pointer or invoke synchronize_sched() 52 * accessed via the old update_util_data pointer or invoke synchronize_rcu()
53 * right after this function to avoid use-after-free. 53 * right after this function to avoid use-after-free.
54 */ 54 */
55void cpufreq_remove_update_util_hook(int cpu) 55void cpufreq_remove_update_util_hook(int cpu)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 033ec7c45f13..2efe629425be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -859,7 +859,7 @@ static void sugov_stop(struct cpufreq_policy *policy)
859 for_each_cpu(cpu, policy->cpus) 859 for_each_cpu(cpu, policy->cpus)
860 cpufreq_remove_update_util_hook(cpu); 860 cpufreq_remove_update_util_hook(cpu);
861 861
862 synchronize_sched(); 862 synchronize_rcu();
863 863
864 if (!policy->fast_switch_enabled) { 864 if (!policy->fast_switch_enabled) {
865 irq_work_sync(&sg_policy->irq_work); 865 irq_work_sync(&sg_policy->irq_work);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d04530bf251f..6665b9c02e2f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1260,7 +1260,7 @@ extern void sched_ttwu_pending(void);
1260 1260
1261/* 1261/*
1262 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1262 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1263 * See detach_destroy_domains: synchronize_sched for details. 1263 * See destroy_sched_domains: call_rcu for details.
1264 * 1264 *
1265 * The domain tree of any CPU may only be accessed from within 1265 * The domain tree of any CPU may only be accessed from within
1266 * preempt-disabled sections. 1266 * preempt-disabled sections.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 3f35ba1d8fde..7d905f55e7fa 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -442,7 +442,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
442 raw_spin_unlock_irqrestore(&rq->lock, flags); 442 raw_spin_unlock_irqrestore(&rq->lock, flags);
443 443
444 if (old_rd) 444 if (old_rd)
445 call_rcu_sched(&old_rd->rcu, free_rootdomain); 445 call_rcu(&old_rd->rcu, free_rootdomain);
446} 446}
447 447
448void sched_get_rd(struct root_domain *rd) 448void sched_get_rd(struct root_domain *rd)
@@ -455,7 +455,7 @@ void sched_put_rd(struct root_domain *rd)
455 if (!atomic_dec_and_test(&rd->refcount)) 455 if (!atomic_dec_and_test(&rd->refcount))
456 return; 456 return;
457 457
458 call_rcu_sched(&rd->rcu, free_rootdomain); 458 call_rcu(&rd->rcu, free_rootdomain);
459} 459}
460 460
461static int init_rootdomain(struct root_domain *rd) 461static int init_rootdomain(struct root_domain *rd)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 444156debfa0..6eb7cc4b6d52 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1632,7 +1632,7 @@ void update_process_times(int user_tick)
1632 /* Note: this timer irq context must be accounted for as well. */ 1632 /* Note: this timer irq context must be accounted for as well. */
1633 account_process_tick(p, user_tick); 1633 account_process_tick(p, user_tick);
1634 run_local_timers(); 1634 run_local_timers();
1635 rcu_check_callbacks(user_tick); 1635 rcu_sched_clock_irq(user_tick);
1636#ifdef CONFIG_IRQ_WORK 1636#ifdef CONFIG_IRQ_WORK
1637 if (in_irq()) 1637 if (in_irq())
1638 irq_work_tick(); 1638 irq_work_tick();
diff --git a/kernel/torture.c b/kernel/torture.c
index bbf6d473e50c..8faa1a9aaeb9 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Common functions for in-kernel torture tests. 3 * Common functions for in-kernel torture tests.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014 5 * Copyright (C) IBM Corporation, 2014
19 * 6 *
20 * Author: Paul E. McKenney <paulmck@us.ibm.com> 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
21 * Based on kernel/rcu/torture.c. 8 * Based on kernel/rcu/torture.c.
22 */ 9 */
23 10
@@ -53,7 +40,7 @@
53#include "rcu/rcu.h" 40#include "rcu/rcu.h"
54 41
55MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
56MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 43MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
57 44
58static char *torture_type; 45static char *torture_type;
59static int verbose; 46static int verbose;
@@ -75,6 +62,7 @@ static DEFINE_MUTEX(fullstop_mutex);
75static struct task_struct *onoff_task; 62static struct task_struct *onoff_task;
76static long onoff_holdoff; 63static long onoff_holdoff;
77static long onoff_interval; 64static long onoff_interval;
65static torture_ofl_func *onoff_f;
78static long n_offline_attempts; 66static long n_offline_attempts;
79static long n_offline_successes; 67static long n_offline_successes;
80static unsigned long sum_offline; 68static unsigned long sum_offline;
@@ -118,6 +106,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
118 pr_alert("%s" TORTURE_FLAG 106 pr_alert("%s" TORTURE_FLAG
119 "torture_onoff task: offlined %d\n", 107 "torture_onoff task: offlined %d\n",
120 torture_type, cpu); 108 torture_type, cpu);
109 if (onoff_f)
110 onoff_f();
121 (*n_offl_successes)++; 111 (*n_offl_successes)++;
122 delta = jiffies - starttime; 112 delta = jiffies - starttime;
123 *sum_offl += delta; 113 *sum_offl += delta;
@@ -243,11 +233,12 @@ stop:
243/* 233/*
244 * Initiate online-offline handling. 234 * Initiate online-offline handling.
245 */ 235 */
246int torture_onoff_init(long ooholdoff, long oointerval) 236int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f)
247{ 237{
248#ifdef CONFIG_HOTPLUG_CPU 238#ifdef CONFIG_HOTPLUG_CPU
249 onoff_holdoff = ooholdoff; 239 onoff_holdoff = ooholdoff;
250 onoff_interval = oointerval; 240 onoff_interval = oointerval;
241 onoff_f = f;
251 if (onoff_interval <= 0) 242 if (onoff_interval <= 0)
252 return 0; 243 return 0;
253 return torture_create_kthread(torture_onoff, NULL, onoff_task); 244 return torture_create_kthread(torture_onoff, NULL, onoff_task);
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/include/nolibc/nolibc.h
index f98f5b92d3eb..1708e9f9f8aa 100644
--- a/tools/testing/selftests/rcutorture/bin/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -3,7 +3,85 @@
3 * Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu> 3 * Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu>
4 */ 4 */
5 5
6/* some archs (at least aarch64) don't expose the regular syscalls anymore by 6/*
7 * This file is designed to be used as a libc alternative for minimal programs
8 * with very limited requirements. It consists of a small number of syscall and
9 * type definitions, and the minimal startup code needed to call main().
10 * All syscalls are declared as static functions so that they can be optimized
11 * away by the compiler when not used.
12 *
13 * Syscalls are split into 3 levels:
14 * - The lower level is the arch-specific syscall() definition, consisting in
15 * assembly code in compound expressions. These are called my_syscall0() to
16 * my_syscall6() depending on the number of arguments. The MIPS
17 * implementation is limited to 5 arguments. All input arguments are cast
18 * to a long stored in a register. These expressions always return the
19 * syscall's return value as a signed long value which is often either a
20 * pointer or the negated errno value.
21 *
22 * - The second level is mostly architecture-independent. It is made of
23 * static functions called sys_<name>() which rely on my_syscallN()
24 * depending on the syscall definition. These functions are responsible
25 * for exposing the appropriate types for the syscall arguments (int,
26 * pointers, etc) and for setting the appropriate return type (often int).
27 * A few of them are architecture-specific because the syscalls are not all
28 * mapped exactly the same among architectures. For example, some archs do
29 * not implement select() and need pselect6() instead, so the sys_select()
30 * function will have to abstract this.
31 *
32 * - The third level is the libc call definition. It exposes the lower raw
33 * sys_<name>() calls in a way that looks like what a libc usually does,
34 * takes care of specific input values, and of setting errno upon error.
35 * There can be minor variations compared to standard libc calls. For
36 * example the open() call always takes 3 args here.
37 *
38 * The errno variable is declared static and unused. This way it can be
39 * optimized away if not used. However this means that a program made of
40 * multiple C files may observe different errno values (one per C file). For
41 * the type of programs this project targets it usually is not a problem. The
42 * resulting program may even be reduced by defining the NOLIBC_IGNORE_ERRNO
43 * macro, in which case the errno value will never be assigned.
44 *
45 * Some stdint-like integer types are defined. These are valid on all currently
46 * supported architectures, because signs are enforced, ints are assumed to be
47 * 32 bits, longs the size of a pointer and long long 64 bits. If more
48 * architectures have to be supported, this may need to be adapted.
49 *
50 * Some macro definitions like the O_* values passed to open(), and some
51 * structures like the sys_stat struct depend on the architecture.
52 *
53 * The definitions start with the architecture-specific parts, which are picked
54 * based on what the compiler knows about the target architecture, and are
55 * completed with the generic code. Since it is the compiler which sets the
56 * target architecture, cross-compiling normally works out of the box without
57 * having to specify anything.
58 *
59 * Finally some very common libc-level functions are provided. It is the case
60 * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing
61 * is currently provided regarding stdio emulation.
62 *
63 * The macro NOLIBC is always defined, so that it is possible for a program to
64 * check this macro to know if it is being built against and decide to disable
65 * some features or simply not to include some standard libc files.
66 *
67 * Ideally this file should be split in multiple files for easier long term
68 * maintenance, but provided as a single file as it is now, it's quite
69 * convenient to use. Maybe some variations involving a set of includes at the
70 * top could work.
71 *
72 * A simple static executable may be built this way :
73 * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
74 * -static -include nolibc.h -lgcc -o hello hello.c
75 *
76 * A very useful calling convention table may be found here :
77 * http://man7.org/linux/man-pages/man2/syscall.2.html
78 *
79 * This doc is quite convenient though not necessarily up to date :
80 * https://w3challs.com/syscalls/
81 *
82 */
83
84/* Some archs (at least aarch64) don't expose the regular syscalls anymore by
7 * default, either because they have an "_at" replacement, or because there are 85 * default, either because they have an "_at" replacement, or because there are
8 * more modern alternatives. For now we'd rather still use them. 86 * more modern alternatives. For now we'd rather still use them.
9 */ 87 */
@@ -19,18 +97,6 @@
19 97
20#define NOLIBC 98#define NOLIBC
21 99
22/* Build a static executable this way :
23 * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
24 * -static -include nolibc.h -lgcc -o hello hello.c
25 *
26 * Useful calling convention table found here :
27 * http://man7.org/linux/man-pages/man2/syscall.2.html
28 *
29 * This doc is even better :
30 * https://w3challs.com/syscalls/
31 */
32
33
34/* this way it will be removed if unused */ 100/* this way it will be removed if unused */
35static int errno; 101static int errno;
36 102
@@ -81,9 +147,9 @@ typedef signed long time_t;
81 147
82/* for poll() */ 148/* for poll() */
83struct pollfd { 149struct pollfd {
84 int fd; 150 int fd;
85 short int events; 151 short int events;
86 short int revents; 152 short int revents;
87}; 153};
88 154
89/* for select() */ 155/* for select() */
@@ -239,7 +305,7 @@ struct stat {
239 "syscall\n" \ 305 "syscall\n" \
240 : "=a" (_ret) \ 306 : "=a" (_ret) \
241 : "0"(_num) \ 307 : "0"(_num) \
242 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ 308 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
243 ); \ 309 ); \
244 _ret; \ 310 _ret; \
245}) 311})
@@ -255,7 +321,7 @@ struct stat {
255 : "=a" (_ret) \ 321 : "=a" (_ret) \
256 : "r"(_arg1), \ 322 : "r"(_arg1), \
257 "0"(_num) \ 323 "0"(_num) \
258 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ 324 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
259 ); \ 325 ); \
260 _ret; \ 326 _ret; \
261}) 327})
@@ -272,7 +338,7 @@ struct stat {
272 : "=a" (_ret) \ 338 : "=a" (_ret) \
273 : "r"(_arg1), "r"(_arg2), \ 339 : "r"(_arg1), "r"(_arg2), \
274 "0"(_num) \ 340 "0"(_num) \
275 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ 341 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
276 ); \ 342 ); \
277 _ret; \ 343 _ret; \
278}) 344})
@@ -290,7 +356,7 @@ struct stat {
290 : "=a" (_ret) \ 356 : "=a" (_ret) \
291 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ 357 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
292 "0"(_num) \ 358 "0"(_num) \
293 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ 359 : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
294 ); \ 360 ); \
295 _ret; \ 361 _ret; \
296}) 362})
@@ -1006,7 +1072,7 @@ struct sys_stat_struct {
1006 : "=r"(_num), "=r"(_arg4) \ 1072 : "=r"(_num), "=r"(_arg4) \
1007 : "r"(_num) \ 1073 : "r"(_num) \
1008 : "memory", "cc", "at", "v1", "hi", "lo", \ 1074 : "memory", "cc", "at", "v1", "hi", "lo", \
1009 \ 1075 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1010 ); \ 1076 ); \
1011 _arg4 ? -_num : _num; \ 1077 _arg4 ? -_num : _num; \
1012}) 1078})
@@ -1025,7 +1091,7 @@ struct sys_stat_struct {
1025 : "0"(_num), \ 1091 : "0"(_num), \
1026 "r"(_arg1) \ 1092 "r"(_arg1) \
1027 : "memory", "cc", "at", "v1", "hi", "lo", \ 1093 : "memory", "cc", "at", "v1", "hi", "lo", \
1028 \ 1094 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1029 ); \ 1095 ); \
1030 _arg4 ? -_num : _num; \ 1096 _arg4 ? -_num : _num; \
1031}) 1097})
@@ -1045,7 +1111,7 @@ struct sys_stat_struct {
1045 : "0"(_num), \ 1111 : "0"(_num), \
1046 "r"(_arg1), "r"(_arg2) \ 1112 "r"(_arg1), "r"(_arg2) \
1047 : "memory", "cc", "at", "v1", "hi", "lo", \ 1113 : "memory", "cc", "at", "v1", "hi", "lo", \
1048 \ 1114 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1049 ); \ 1115 ); \
1050 _arg4 ? -_num : _num; \ 1116 _arg4 ? -_num : _num; \
1051}) 1117})
@@ -1066,7 +1132,7 @@ struct sys_stat_struct {
1066 : "0"(_num), \ 1132 : "0"(_num), \
1067 "r"(_arg1), "r"(_arg2), "r"(_arg3) \ 1133 "r"(_arg1), "r"(_arg2), "r"(_arg3) \
1068 : "memory", "cc", "at", "v1", "hi", "lo", \ 1134 : "memory", "cc", "at", "v1", "hi", "lo", \
1069 \ 1135 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1070 ); \ 1136 ); \
1071 _arg4 ? -_num : _num; \ 1137 _arg4 ? -_num : _num; \
1072}) 1138})
@@ -1087,7 +1153,7 @@ struct sys_stat_struct {
1087 : "0"(_num), \ 1153 : "0"(_num), \
1088 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \ 1154 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
1089 : "memory", "cc", "at", "v1", "hi", "lo", \ 1155 : "memory", "cc", "at", "v1", "hi", "lo", \
1090 \ 1156 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1091 ); \ 1157 ); \
1092 _arg4 ? -_num : _num; \ 1158 _arg4 ? -_num : _num; \
1093}) 1159})
@@ -1110,7 +1176,7 @@ struct sys_stat_struct {
1110 : "0"(_num), \ 1176 : "0"(_num), \
1111 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \ 1177 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
1112 : "memory", "cc", "at", "v1", "hi", "lo", \ 1178 : "memory", "cc", "at", "v1", "hi", "lo", \
1113 \ 1179 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
1114 ); \ 1180 ); \
1115 _arg4 ? -_num : _num; \ 1181 _arg4 ? -_num : _num; \
1116}) 1182})
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore
new file mode 100644
index 000000000000..b1d34c52f3c3
--- /dev/null
+++ b/tools/memory-model/.gitignore
@@ -0,0 +1 @@
litmus
diff --git a/tools/memory-model/README b/tools/memory-model/README
index acf9077cffaa..0f2c366518c6 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -156,6 +156,8 @@ lock.cat
156README 156README
157 This file. 157 This file.
158 158
159scripts Various scripts, see scripts/README.
160
159 161
160=========== 162===========
161LIMITATIONS 163LIMITATIONS
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index b84fb2f67109..796513362c05 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -29,7 +29,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
29 'sync-rcu (*synchronize_rcu*) || 29 'sync-rcu (*synchronize_rcu*) ||
30 'before-atomic (*smp_mb__before_atomic*) || 30 'before-atomic (*smp_mb__before_atomic*) ||
31 'after-atomic (*smp_mb__after_atomic*) || 31 'after-atomic (*smp_mb__after_atomic*) ||
32 'after-spinlock (*smp_mb__after_spinlock*) 32 'after-spinlock (*smp_mb__after_spinlock*) ||
33 'after-unlock-lock (*smp_mb__after_unlock_lock*)
33instructions F[Barriers] 34instructions F[Barriers]
34 35
35(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *) 36(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 882fc33274ac..8f23c74a96fd 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -30,7 +30,9 @@ let wmb = [W] ; fencerel(Wmb) ; [W]
30let mb = ([M] ; fencerel(Mb) ; [M]) | 30let mb = ([M] ; fencerel(Mb) ; [M]) |
31 ([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) | 31 ([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
32 ([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) | 32 ([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
33 ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) 33 ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
34 ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
35 fencerel(After-unlock-lock) ; [M])
34let gp = po ; [Sync-rcu] ; po? 36let gp = po ; [Sync-rcu] ; po?
35 37
36let strong-fence = mb | gp 38let strong-fence = mb | gp
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 6fa3eb28d40b..b27911cc087d 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -23,6 +23,7 @@ smp_wmb() { __fence{wmb}; }
23smp_mb__before_atomic() { __fence{before-atomic}; } 23smp_mb__before_atomic() { __fence{before-atomic}; }
24smp_mb__after_atomic() { __fence{after-atomic}; } 24smp_mb__after_atomic() { __fence{after-atomic}; }
25smp_mb__after_spinlock() { __fence{after-spinlock}; } 25smp_mb__after_spinlock() { __fence{after-spinlock}; }
26smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
26 27
27// Exchange 28// Exchange
28xchg(X,V) __xchg{mb}(X,V) 29xchg(X,V) __xchg{mb}(X,V)
diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README
new file mode 100644
index 000000000000..29375a1fbbfa
--- /dev/null
+++ b/tools/memory-model/scripts/README
@@ -0,0 +1,70 @@
1 ============
2 LKMM SCRIPTS
3 ============
4
5
6These scripts are run from the tools/memory-model directory.
7
8checkalllitmus.sh
9
10 Run all litmus tests in the litmus-tests directory, checking
11 the results against the expected results recorded in the
12 "Result:" comment lines.
13
14checkghlitmus.sh
15
16 Run all litmus tests in the https://github.com/paulmckrcu/litmus
17 archive that are C-language and that have "Result:" comment lines
18 documenting expected results, comparing the actual results to
19 those expected.
20
21checklitmushist.sh
22
23 Run all litmus tests having .litmus.out files from previous
24 initlitmushist.sh or newlitmushist.sh runs, comparing the
25 herd output to that of the original runs.
26
27checklitmus.sh
28
29 Check a single litmus test against its "Result:" expected result.
30
31cmplitmushist.sh
32
33 Compare output from two different runs of the same litmus tests,
34 with the absolute pathnames of the tests to run provided one
35 name per line on standard input. Not normally run manually,
36 provided instead for use by other scripts.
37
38initlitmushist.sh
39
40 Run all litmus tests having no more than the specified number
41 of processes given a specified timeout, recording the results
42 in .litmus.out files.
43
44judgelitmus.sh
45
46 Given a .litmus file and its .litmus.out herd output, check the
47 .litmus.out file against the .litmus file's "Result:" comment to
48 judge whether the test ran correctly. Not normally run manually,
49 provided instead for use by other scripts.
50
51newlitmushist.sh
52
53 For all new or updated litmus tests having no more than the
54 specified number of processes given a specified timeout, run
55 and record the results in .litmus.out files.
56
57parseargs.sh
58
59 Parse command-line arguments. Not normally run manually,
60 provided instead for use by other scripts.
61
62runlitmushist.sh
63
64 Run the litmus tests whose absolute pathnames are provided one
65 name per line on standard input. Not normally run manually,
66 provided instead for use by other scripts.
67
68README
69
70 This file
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index ca528f9a24d4..b35fcd61ecf6 100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -1,42 +1,27 @@
1#!/bin/sh 1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
2# 3#
3# Run herd tests on all .litmus files in the specified directory (which 4# Run herd tests on all .litmus files in the litmus-tests directory
4# defaults to litmus-tests) and check each file's result against a "Result:" 5# and check each file's result against a "Result:" comment within that
5# comment within that litmus test. If the verification result does not 6# litmus test. If the verification result does not match that specified
6# match that specified in the litmus test, this script prints an error 7# in the litmus test, this script prints an error message prefixed with
7# message prefixed with "^^^". It also outputs verification results to 8# "^^^". It also outputs verification results to a file whose name is
8# a file whose name is that of the specified litmus test, but with ".out" 9# that of the specified litmus test, but with ".out" appended.
9# appended.
10# 10#
11# Usage: 11# Usage:
12# checkalllitmus.sh [ directory ] 12# checkalllitmus.sh
13# 13#
14# The LINUX_HERD_OPTIONS environment variable may be used to specify 14# Run this in the directory containing the memory model.
15# arguments to herd, whose default is defined by the checklitmus.sh script.
16# Thus, one would normally run this in the directory containing the memory
17# model, specifying the pathname of the litmus test to check.
18# 15#
19# This script makes no attempt to run the litmus tests concurrently. 16# This script makes no attempt to run the litmus tests concurrently.
20# 17#
21# This program is free software; you can redistribute it and/or modify
22# it under the terms of the GNU General Public License as published by
23# the Free Software Foundation; either version 2 of the License, or
24# (at your option) any later version.
25#
26# This program is distributed in the hope that it will be useful,
27# but WITHOUT ANY WARRANTY; without even the implied warranty of
28# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29# GNU General Public License for more details.
30#
31# You should have received a copy of the GNU General Public License
32# along with this program; if not, you can access it online at
33# http://www.gnu.org/licenses/gpl-2.0.html.
34#
35# Copyright IBM Corporation, 2018 18# Copyright IBM Corporation, 2018
36# 19#
37# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 20# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
38 21
39litmusdir=${1-litmus-tests} 22. scripts/parseargs.sh
23
24litmusdir=litmus-tests
40if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir" 25if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir"
41then 26then
42 : 27 :
@@ -45,6 +30,14 @@ else
45 exit 255 30 exit 255
46fi 31fi
47 32
33# Create any new directories that have appeared in the github litmus
34# repo since the last run.
35if test "$LKMM_DESTDIR" != "."
36then
37 find $litmusdir -type d -print |
38 ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
39fi
40
48# Find the checklitmus script. If it is not where we expect it, then 41# Find the checklitmus script. If it is not where we expect it, then
49# assume that the caller has the PATH environment variable set 42# assume that the caller has the PATH environment variable set
50# appropriately. 43# appropriately.
@@ -57,7 +50,7 @@ fi
57 50
58# Run the script on all the litmus tests in the specified directory 51# Run the script on all the litmus tests in the specified directory
59ret=0 52ret=0
60for i in litmus-tests/*.litmus 53for i in $litmusdir/*.litmus
61do 54do
62 if ! $clscript $i 55 if ! $clscript $i
63 then 56 then
@@ -66,8 +59,8 @@ do
66done 59done
67if test "$ret" -ne 0 60if test "$ret" -ne 0
68then 61then
69 echo " ^^^ VERIFICATION MISMATCHES" 62 echo " ^^^ VERIFICATION MISMATCHES" 1>&2
70else 63else
71 echo All litmus tests verified as was expected. 64 echo All litmus tests verified as was expected. 1>&2
72fi 65fi
73exit $ret 66exit $ret
diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh
new file mode 100644
index 000000000000..6589fbb6f653
--- /dev/null
+++ b/tools/memory-model/scripts/checkghlitmus.sh
@@ -0,0 +1,65 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Runs the C-language litmus tests having a maximum number of processes
5# to run, defaults to 6.
6#
7# sh checkghlitmus.sh
8#
9# Run from the Linux kernel tools/memory-model directory. See the
10# parseargs.sh scripts for arguments.
11
12. scripts/parseargs.sh
13
14T=/tmp/checkghlitmus.sh.$$
15trap 'rm -rf $T' 0
16mkdir $T
17
18# Clone the repository if it is not already present.
19if test -d litmus
20then
21 :
22else
23 git clone https://github.com/paulmckrcu/litmus
24 ( cd litmus; git checkout origin/master )
25fi
26
27# Create any new directories that have appeared in the github litmus
28# repo since the last run.
29if test "$LKMM_DESTDIR" != "."
30then
31 find litmus -type d -print |
32 ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
33fi
34
35# Create a list of the C-language litmus tests previously run.
36( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
37 sed -e 's/\.out$//' |
38 xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
39 xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
40
41# Create a list of C-language litmus tests with "Result:" commands and
42# no more than the specified number of processes.
43find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
44xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
45xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
46
47# Form list of tests without corresponding .litmus.out files
48sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
49
50# Run any needed tests.
51if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr
52then
53 errs=
54else
55 errs=1
56fi
57
58sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' |
59 sh > $T/judge.stdout 2> $T/judge.stderr
60
61if test -n "$errs"
62then
63 cat $T/run.stderr 1>&2
64fi
65grep '!!!' $T/judge.stdout
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index bf12a75c0719..dd08801a30b0 100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -1,40 +1,24 @@
1#!/bin/sh 1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
2# 3#
3# Run a herd test and check the result against a "Result:" comment within 4# Run a herd test and invokes judgelitmus.sh to check the result against
4# the litmus test. If the verification result does not match that specified 5# a "Result:" comment within the litmus test. It also outputs verification
5# in the litmus test, this script prints an error message prefixed with
6# "^^^" and exits with a non-zero status. It also outputs verification
7# results to a file whose name is that of the specified litmus test, but 6# results to a file whose name is that of the specified litmus test, but
8# with ".out" appended. 7# with ".out" appended.
9# 8#
10# Usage: 9# Usage:
11# checklitmus.sh file.litmus 10# checklitmus.sh file.litmus
12# 11#
13# The LINUX_HERD_OPTIONS environment variable may be used to specify 12# Run this in the directory containing the memory model, specifying the
14# arguments to herd, which default to "-conf linux-kernel.cfg". Thus, 13# pathname of the litmus test to check. The caller is expected to have
15# one would normally run this in the directory containing the memory model, 14# properly set up the LKMM environment variables.
16# specifying the pathname of the litmus test to check.
17#
18# This program is free software; you can redistribute it and/or modify
19# it under the terms of the GNU General Public License as published by
20# the Free Software Foundation; either version 2 of the License, or
21# (at your option) any later version.
22#
23# This program is distributed in the hope that it will be useful,
24# but WITHOUT ANY WARRANTY; without even the implied warranty of
25# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26# GNU General Public License for more details.
27#
28# You should have received a copy of the GNU General Public License
29# along with this program; if not, you can access it online at
30# http://www.gnu.org/licenses/gpl-2.0.html.
31# 15#
32# Copyright IBM Corporation, 2018 16# Copyright IBM Corporation, 2018
33# 17#
34# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 18# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
35 19
36litmus=$1 20litmus=$1
37herdoptions=${LINUX_HERD_OPTIONS--conf linux-kernel.cfg} 21herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
38 22
39if test -f "$litmus" -a -r "$litmus" 23if test -f "$litmus" -a -r "$litmus"
40then 24then
@@ -43,44 +27,8 @@ else
43 echo ' --- ' error: \"$litmus\" is not a readable file 27 echo ' --- ' error: \"$litmus\" is not a readable file
44 exit 255 28 exit 255
45fi 29fi
46if grep -q '^ \* Result: ' $litmus
47then
48 outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
49else
50 outcome=specified
51fi
52 30
53echo Herd options: $herdoptions > $litmus.out 31echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
54/usr/bin/time herd7 -o ~/tmp $herdoptions $litmus >> $litmus.out 2>&1 32/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
55grep "Herd options:" $litmus.out 33
56grep '^Observation' $litmus.out 34scripts/judgelitmus.sh $litmus
57if grep -q '^Observation' $litmus.out
58then
59 :
60else
61 cat $litmus.out
62 echo ' ^^^ Verification error'
63 echo ' ^^^ Verification error' >> $litmus.out 2>&1
64 exit 255
65fi
66if test "$outcome" = DEADLOCK
67then
68 echo grep 3 and 4
69 if grep '^Observation' $litmus.out | grep -q 'Never 0 0$'
70 then
71 ret=0
72 else
73 echo " ^^^ Unexpected non-$outcome verification"
74 echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
75 ret=1
76 fi
77elif grep '^Observation' $litmus.out | grep -q $outcome || test "$outcome" = Maybe
78then
79 ret=0
80else
81 echo " ^^^ Unexpected non-$outcome verification"
82 echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
83 ret=1
84fi
85tail -2 $litmus.out | head -1
86exit $ret
diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh
new file mode 100644
index 000000000000..1d210ffb7c8a
--- /dev/null
+++ b/tools/memory-model/scripts/checklitmushist.sh
@@ -0,0 +1,60 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Reruns the C-language litmus tests previously run that match the
5# specified criteria, and compares the result to that of the previous
6# runs from initlitmushist.sh and/or newlitmushist.sh.
7#
8# sh checklitmushist.sh
9#
10# Run from the Linux kernel tools/memory-model directory.
11# See scripts/parseargs.sh for list of arguments.
12#
13# Copyright IBM Corporation, 2018
14#
15# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
16
17. scripts/parseargs.sh
18
19T=/tmp/checklitmushist.sh.$$
20trap 'rm -rf $T' 0
21mkdir $T
22
23if test -d litmus
24then
25 :
26else
27 echo Run scripts/initlitmushist.sh first, need litmus repo.
28 exit 1
29fi
30
31# Create the results directory and populate it with subdirectories.
32# The initial output is created here to avoid clobbering the output
33# generated earlier.
34mkdir $T/results
35find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh )
36
37# Create the list of litmus tests already run, then remove those that
38# are excluded by this run's --procs argument.
39( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
40 sed -e 's/\.out$//' |
41 xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
42xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
43
44# Redirect output, run tests, then restore destination directory.
45destdir="$LKMM_DESTDIR"
46LKMM_DESTDIR=$T/results; export LKMM_DESTDIR
47scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1
48LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR
49
50# Move the newly generated .litmus.out files to .litmus.out.new files
51# in the destination directory.
52cdir=`pwd`
53ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \
54 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null`
55( cd $T/results; find litmus -type f -name '*.litmus.out' -print |
56 sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh )
57
58sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' |
59 sh scripts/cmplitmushist.sh
60exit $?
diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh
new file mode 100644
index 000000000000..0f498aeeccf5
--- /dev/null
+++ b/tools/memory-model/scripts/cmplitmushist.sh
@@ -0,0 +1,87 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Compares .out and .out.new files for each name on standard input,
5# one full pathname per line. Outputs comparison results followed by
6# a summary.
7#
8# sh cmplitmushist.sh
9
10T=/tmp/cmplitmushist.sh.$$
11trap 'rm -rf $T' 0
12mkdir $T
13
14# comparetest oldpath newpath
15perfect=0
16obsline=0
17noobsline=0
18obsresult=0
19badcompare=0
20comparetest () {
21 grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
22 grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
23 if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
24 then
25 echo Exact output match: $2
26 perfect=`expr "$perfect" + 1`
27 return 0
28 fi
29
30 grep '^Observation' $1 > $T/oldout
31 grep '^Observation' $2 > $T/newout
32 if test -s $T/oldout -o -s $T/newout
33 then
34 if cmp -s $T/oldout $T/newout
35 then
36 echo Matching Observation result and counts: $2
37 obsline=`expr "$obsline" + 1`
38 return 0
39 fi
40 else
41 echo Missing Observation line "(e.g., herd7 timeout)": $2
42 noobsline=`expr "$noobsline" + 1`
43 return 0
44 fi
45
46 grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout
47 grep '^Observation' $2 | awk '{ print $3 }' > $T/newout
48 if cmp -s $T/oldout $T/newout
49 then
50 echo Matching Observation Always/Sometimes/Never result: $2
51 obsresult=`expr "$obsresult" + 1`
52 return 0
53 fi
54 echo ' !!!' Result changed: $2
55 badcompare=`expr "$badcompare" + 1`
56 return 1
57}
58
59sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript
60. $T/cmpscript > $T/cmpscript.out
61cat $T/cmpscript.out
62
63echo ' ---' Summary: 1>&2
64grep '!!!' $T/cmpscript.out 1>&2
65if test "$perfect" -ne 0
66then
67 echo Exact output matches: $perfect 1>&2
68fi
69if test "$obsline" -ne 0
70then
71 echo Matching Observation result and counts: $obsline 1>&2
72fi
73if test "$noobsline" -ne 0
74then
75 echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
76fi
77if test "$obsresult" -ne 0
78then
79 echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
80fi
81if test "$badcompare" -ne 0
82then
83 echo "!!!" Result changed: $badcompare 1>&2
84 exit 1
85fi
86
87exit 0
diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh
new file mode 100644
index 000000000000..956b6957484d
--- /dev/null
+++ b/tools/memory-model/scripts/initlitmushist.sh
@@ -0,0 +1,68 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Runs the C-language litmus tests matching the specified criteria.
5# Generates the output for each .litmus file into a corresponding
6# .litmus.out file, and does not judge the result.
7#
8# sh initlitmushist.sh
9#
10# Run from the Linux kernel tools/memory-model directory.
11# See scripts/parseargs.sh for list of arguments.
12#
13# This script can consume significant wallclock time and CPU, especially as
14# the value of --procs rises. On a four-core (eight hardware threads)
15# 2.5GHz x86 with a one-minute per-run timeout:
16#
17# --procs wallclock CPU timeouts tests
18# 1 0m11.241s 0m1.086s 0 19
19# 2 1m12.598s 2m8.459s 2 393
20# 3 1m30.007s 6m2.479s 4 2291
21# 4 3m26.042s 18m5.139s 9 3217
22# 5 4m26.661s 23m54.128s 13 3784
23# 6 4m41.900s 26m4.721s 13 4352
24# 7 5m51.463s 35m50.868s 13 4626
25# 8 10m5.235s 68m43.672s 34 5117
26# 9 15m57.80s 105m58.101s 69 5156
27# 10 16m14.13s 103m35.009s 69 5165
28# 20 27m48.55s 198m3.286s 156 5269
29#
30# Increasing the timeout on the 20-process run to five minutes increases
31# the runtime to about 90 minutes with the CPU time rising to about
32# 10 hours. On the other hand, it decreases the number of timeouts to 101.
33#
34# Note that there are historical tests for which herd7 will fail
35# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus
36# contains a call to spin_unlock_wait(), which no longer exists in either
37# the kernel or LKMM.
38
39. scripts/parseargs.sh
40
41T=/tmp/initlitmushist.sh.$$
42trap 'rm -rf $T' 0
43mkdir $T
44
45if test -d litmus
46then
47 :
48else
49 git clone https://github.com/paulmckrcu/litmus
50 ( cd litmus; git checkout origin/master )
51fi
52
53# Create any new directories that have appeared in the github litmus
54# repo since the last run.
55if test "$LKMM_DESTDIR" != "."
56then
57 find litmus -type d -print |
58 ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
59fi
60
61# Create a list of the C-language litmus tests with no more than the
62# specified number of processes (per the --procs argument).
63find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
64xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
65
66scripts/runlitmushist.sh < $T/list-C-short
67
68exit 0
diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh
new file mode 100644
index 000000000000..0cc63875e395
--- /dev/null
+++ b/tools/memory-model/scripts/judgelitmus.sh
@@ -0,0 +1,78 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Given a .litmus test and the corresponding .litmus.out file, check
5# the .litmus.out file against the "Result:" comment to judge whether
6# the test ran correctly.
7#
8# Usage:
9# judgelitmus.sh file.litmus
10#
11# Run this in the directory containing the memory model, specifying the
12# pathname of the litmus test to check.
13#
14# Copyright IBM Corporation, 2018
15#
16# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
17
18litmus=$1
19
20if test -f "$litmus" -a -r "$litmus"
21then
22 :
23else
24 echo ' --- ' error: \"$litmus\" is not a readable file
25 exit 255
26fi
27if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
28then
29 :
30else
31 echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
32 exit 255
33fi
34if grep -q '^ \* Result: ' $litmus
35then
36 outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
37else
38 outcome=specified
39fi
40
41grep '^Observation' $LKMM_DESTDIR/$litmus.out
42if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
43then
44 :
45else
46 echo ' !!! Verification error' $litmus
47 if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
48 then
49 echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
50 fi
51 exit 255
52fi
53if test "$outcome" = DEADLOCK
54then
55 if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
56 then
57 ret=0
58 else
59 echo " !!! Unexpected non-$outcome verification" $litmus
60 if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
61 then
62 echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
63 fi
64 ret=1
65 fi
66elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
67then
68 ret=0
69else
70 echo " !!! Unexpected non-$outcome verification" $litmus
71 if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
72 then
73 echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
74 fi
75 ret=1
76fi
77tail -2 $LKMM_DESTDIR/$litmus.out | head -1
78exit $ret
diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh
new file mode 100644
index 000000000000..991f8f814881
--- /dev/null
+++ b/tools/memory-model/scripts/newlitmushist.sh
@@ -0,0 +1,61 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Runs the C-language litmus tests matching the specified criteria
5# that do not already have a corresponding .litmus.out file, and does
6# not judge the result.
7#
8# sh newlitmushist.sh
9#
10# Run from the Linux kernel tools/memory-model directory.
11# See scripts/parseargs.sh for list of arguments.
12#
13# Copyright IBM Corporation, 2018
14#
15# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
16
17. scripts/parseargs.sh
18
19T=/tmp/newlitmushist.sh.$$
20trap 'rm -rf $T' 0
21mkdir $T
22
23if test -d litmus
24then
25 :
26else
27 echo Run scripts/initlitmushist.sh first, need litmus repo.
28 exit 1
29fi
30
31# Create any new directories that have appeared in the github litmus
32# repo since the last run.
33if test "$LKMM_DESTDIR" != "."
34then
35 find litmus -type d -print |
36 ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
37fi
38
39# Create a list of the C-language litmus tests previously run.
40( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
41 sed -e 's/\.out$//' |
42 xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
43
44# Form full list of litmus tests with no more than the specified
45# number of processes (per the --procs argument).
46find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
47xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
48
49# Form list of new tests. Note: This does not handle litmus-test deletion!
50sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new
51
52# Form list of litmus tests that have changed since the last run.
53sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script
54sh $T/list-C-script > $T/list-C-newer
55
56# Merge the list of new and of updated litmus tests: These must be (re)run.
57sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed
58
59scripts/runlitmushist.sh < $T/list-C-needed
60
61exit 0
diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh
new file mode 100644
index 000000000000..859e1d581e05
--- /dev/null
+++ b/tools/memory-model/scripts/parseargs.sh
@@ -0,0 +1,136 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0+
3#
4# the corresponding .litmus.out file, and does not judge the result.
5#
6# . scripts/parseargs.sh
7#
8# Include into other Linux kernel tools/memory-model scripts.
9#
10# Copyright IBM Corporation, 2018
11#
12# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
13
14T=/tmp/parseargs.sh.$$
15mkdir $T
16
17# Initialize one parameter: initparam name default
18initparam () {
19 echo if test -z '"$'$1'"' > $T/s
20 echo then >> $T/s
21 echo $1='"'$2'"' >> $T/s
22 echo export $1 >> $T/s
23 echo fi >> $T/s
24 echo $1_DEF='$'$1 >> $T/s
25 . $T/s
26}
27
28initparam LKMM_DESTDIR "."
29initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
30initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
31initparam LKMM_PROCS "3"
32initparam LKMM_TIMEOUT "1m"
33
34scriptname=$0
35
36usagehelp () {
37 echo "Usage $scriptname [ arguments ]"
38 echo " --destdir path (place for .litmus.out, default by .litmus)"
39 echo " --herdopts -conf linux-kernel.cfg ..."
40 echo " --jobs N (number of jobs, default one per CPU)"
41 echo " --procs N (litmus tests with at most this many processes)"
42 echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
43 echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
44 exit 1
45}
46
47usage () {
48 usagehelp 1>&2
49}
50
51# checkarg --argname argtype $# arg mustmatch cannotmatch
52checkarg () {
53 if test $3 -le 1
54 then
55 echo $1 needs argument $2 matching \"$5\"
56 usage
57 fi
58 if echo "$4" | grep -q -e "$5"
59 then
60 :
61 else
62 echo $1 $2 \"$4\" must match \"$5\"
63 usage
64 fi
65 if echo "$4" | grep -q -e "$6"
66 then
67 echo $1 $2 \"$4\" must not match \"$6\"
68 usage
69 fi
70}
71
72while test $# -gt 0
73do
74 case "$1" in
75 --destdir)
76 checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--'
77 LKMM_DESTDIR="$2"
78 mkdir $LKMM_DESTDIR > /dev/null 2>&1
79 if ! test -e "$LKMM_DESTDIR"
80 then
81 echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
82 usage
83 fi
84 if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
85 then
86 :
87 else
88 echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files"
89 usage
90 fi
91 shift
92 ;;
93 --herdopts|--herdopt)
94 checkarg --destdir "(herd options)" "$#" "$2" '.*' '^--'
95 LKMM_HERD_OPTIONS="$2"
96 shift
97 ;;
98 -j[1-9]*)
99 njobs="`echo $1 | sed -e 's/^-j//'`"
100 trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
101 if test -n "$trailchars"
102 then
103 echo $1 trailing characters "'$trailchars'"
104 usagehelp
105 fi
106 LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
107 ;;
108 --jobs|--job|-j)
109 checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
110 LKMM_JOBS="$2"
111 shift
112 ;;
113 --procs|--proc)
114 checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--'
115 LKMM_PROCS="$2"
116 shift
117 ;;
118 --timeout)
119 checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--'
120 LKMM_TIMEOUT="$2"
121 shift
122 ;;
123 *)
124 echo Unknown argument $1
125 usage
126 ;;
127 esac
128 shift
129done
130if test -z "$LKMM_TIMEOUT"
131then
132 LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD
133else
134 LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD
135fi
136rm -rf $T
diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh
new file mode 100644
index 000000000000..e507f5f933d5
--- /dev/null
+++ b/tools/memory-model/scripts/runlitmushist.sh
@@ -0,0 +1,87 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0+
3#
4# Runs the C-language litmus tests specified on standard input, using up
5# to the specified number of CPUs (defaulting to all of them) and placing
6# the results in the specified directory (defaulting to the same place
7# the litmus test came from).
8#
9# sh runlitmushist.sh
10#
11# Run from the Linux kernel tools/memory-model directory.
12# This script uses environment variables produced by parseargs.sh.
13#
14# Copyright IBM Corporation, 2018
15#
16# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
17
18T=/tmp/runlitmushist.sh.$$
19trap 'rm -rf $T' 0
20mkdir $T
21
22if test -d litmus
23then
24 :
25else
26 echo Directory \"litmus\" missing, aborting run.
27 exit 1
28fi
29
30# Prefixes for per-CPU scripts
31for ((i=0;i<$LKMM_JOBS;i++))
32do
33 echo dir="$LKMM_DESTDIR" > $T/$i.sh
34 echo T=$T >> $T/$i.sh
35 echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
36 cat << '___EOF___' >> $T/$i.sh
37 runtest () {
38 echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
39 if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
40 then
41 if ! grep -q '^Observation ' $dir/$1.out
42 then
43 echo ' !!! Herd failed, no Observation:' $1
44 fi
45 else
46 exitcode=$?
47 if test "$exitcode" -eq 124
48 then
49 exitmsg="timed out"
50 else
51 exitmsg="failed, exit code $exitcode"
52 fi
53 echo ' !!! Herd' ${exitmsg}: $1
54 fi
55 }
56___EOF___
57done
58
59awk -v q="'" -v b='\\' '
60{
61 print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
62}' | bash |
63sort -k1n |
64awk -v ncpu=$LKMM_JOBS -v t=$T '
65{
66 print "runtest " $2 >> t "/" NR % ncpu ".sh";
67}
68
69END {
70 for (i = 0; i < ncpu; i++) {
71 print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &";
72 close(t "/" i ".sh");
73 }
74 print "wait";
75}' | sh
76cat $T/*.sh.out
77if grep -q '!!!' $T/*.sh.out
78then
79 echo ' ---' Summary: 1>&2
80 grep '!!!' $T/*.sh.out 1>&2
81 nfail="`grep '!!!' $T/*.sh.out | wc -l`"
82 echo 'Number of failed herd runs (e.g., timeout): ' $nfail 1>&2
83 exit 1
84else
85 echo All runs completed successfully. 1>&2
86 exit 0
87fi
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index da298394daa2..83552bb007b4 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -40,17 +40,24 @@ mkdir $T
40cat > $T/init << '__EOF___' 40cat > $T/init << '__EOF___'
41#!/bin/sh 41#!/bin/sh
42# Run in userspace a few milliseconds every second. This helps to 42# Run in userspace a few milliseconds every second. This helps to
43# exercise the NO_HZ_FULL portions of RCU. 43# exercise the NO_HZ_FULL portions of RCU. The 192 instances of "a" was
44# empirically shown to give a nice multi-millisecond burst of user-mode
45# execution on a 2GHz CPU, as desired. Modern CPUs will vary from a
46# couple of milliseconds up to perhaps 100 milliseconds, which is an
47# acceptable range.
48#
49# Why not calibrate an exact delay? Because within this initrd, we
50# are restricted to Bourne-shell builtins, which as far as I know do not
51# provide any means of obtaining a fine-grained timestamp.
52
53a4="a a a a"
54a16="$a4 $a4 $a4 $a4"
55a64="$a16 $a16 $a16 $a16"
56a192="$a64 $a64 $a64"
44while : 57while :
45do 58do
46 q= 59 q=
47 for i in \ 60 for i in $a192
48 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
49 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
50 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
51 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
52 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
53 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
54 do 61 do
55 q="$q $i" 62 q="$q $i"
56 done 63 done
@@ -124,8 +131,8 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
124 | grep -q '^yes'; then 131 | grep -q '^yes'; then
125 # architecture supported by nolibc 132 # architecture supported by nolibc
126 ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \ 133 ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
127 -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \ 134 -nostdlib -include ../../../../include/nolibc/nolibc.h \
128 -o init init.c 135 -lgcc -s -static -Os -o init init.c
129else 136else
130 ${CROSS_COMPILE}gcc -s -static -Os -o init init.c 137 ${CROSS_COMPILE}gcc -s -static -Os -o init init.c
131fi 138fi
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 585845203db8..38df17b7760e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4084,7 +4084,7 @@ static int kvm_suspend(void)
4084static void kvm_resume(void) 4084static void kvm_resume(void)
4085{ 4085{
4086 if (kvm_usage_count) { 4086 if (kvm_usage_count) {
4087 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 4087 lockdep_assert_held(&kvm_count_lock);
4088 hardware_enable_nolock(NULL); 4088 hardware_enable_nolock(NULL);
4089 } 4089 }
4090} 4090}