aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-07-16 07:49:15 -0400
committerIngo Molnar <mingo@kernel.org>2014-07-17 05:34:01 -0400
commit01c9db827146ce321562a992a5dbc1a49b1a99ce (patch)
tree2014c68991343e5b7ddf0408c2527ff08038e08d
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
parent187497fa5e9e9383820d33e48b87f8200a747c2a (diff)
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: * Update RCU documentation. * Miscellaneous fixes. * Maintainership changes. * Torture-test updates. * Callback-offloading changes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/RCU/RTFP.txt4
-rw-r--r--Documentation/RCU/rcuref.txt9
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/memory-barriers.txt27
-rw-r--r--MAINTAINERS18
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/rcupdate.h91
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/tick.h20
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/rcu/rcu.h8
-rw-r--r--kernel/rcu/srcu.c4
-rw-r--r--kernel/rcu/tree.c199
-rw-r--r--kernel/rcu/tree.h42
-rw-r--r--kernel/rcu/tree_plugin.h304
-rw-r--r--kernel/rcu/update.c25
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/time/tick-sched.c10
-rw-r--r--kernel/torture.c2
-rw-r--r--lib/Kconfig.debug14
-rwxr-xr-xscripts/get_maintainer.pl22
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rw-r--r--tools/testing/selftests/rcutorture/bin/kvm.sh21
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE011
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE021
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE02-T1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE031
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE041
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE051
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE061
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE071
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE081
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08-T1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE091
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp1
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt1
41 files changed, 629 insertions, 269 deletions
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 2f0fcb2112d2..f29bcbc463e7 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -2451,8 +2451,8 @@ lot of {Linux} into your technology!!!"
2451,month="February" 2451,month="February"
2452,year="2010" 2452,year="2010"
2453,note="Available: 2453,note="Available:
2454\url{http://kerneltrap.com/mailarchive/linux-netdev/2010/2/26/6270589} 2454\url{http://thread.gmane.org/gmane.linux.network/153338}
2455[Viewed March 20, 2011]" 2455[Viewed June 9, 2014]"
2456,annotation={ 2456,annotation={
2457 Use a pair of list_head structures to support RCU-protected 2457 Use a pair of list_head structures to support RCU-protected
2458 resizable hash tables. 2458 resizable hash tables.
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt
index 141d531aa14b..613033ff2b9b 100644
--- a/Documentation/RCU/rcuref.txt
+++ b/Documentation/RCU/rcuref.txt
@@ -1,5 +1,14 @@
1Reference-count design for elements of lists/arrays protected by RCU. 1Reference-count design for elements of lists/arrays protected by RCU.
2 2
3
4Please note that the percpu-ref feature is likely your first
5stop if you need to combine reference counts and RCU. Please see
6include/linux/percpu-refcount.h for more information. However, in
7those unusual cases where percpu-ref would consume too much memory,
8please read on.
9
10------------------------------------------------------------------------
11
3Reference counting on elements of lists which are protected by traditional 12Reference counting on elements of lists which are protected by traditional
4reader/writer spinlocks or semaphores are straightforward: 13reader/writer spinlocks or semaphores are straightforward:
5 14
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1b9aa8c5a52..6439c9380198 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2790 leaf rcu_node structure. Useful for very large 2790 leaf rcu_node structure. Useful for very large
2791 systems. 2791 systems.
2792 2792
2793 rcutree.jiffies_till_sched_qs= [KNL]
2794 Set required age in jiffies for a
2795 given grace period before RCU starts
2796 soliciting quiescent-state help from
2797 rcu_note_context_switch().
2798
2793 rcutree.jiffies_till_first_fqs= [KNL] 2799 rcutree.jiffies_till_first_fqs= [KNL]
2794 Set delay from grace-period initialization to 2800 Set delay from grace-period initialization to
2795 first attempt to force quiescent states. 2801 first attempt to force quiescent states.
@@ -2801,6 +2807,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2801 quiescent states. Units are jiffies, minimum 2807 quiescent states. Units are jiffies, minimum
2802 value is one, and maximum value is HZ. 2808 value is one, and maximum value is HZ.
2803 2809
2810 rcutree.rcu_nocb_leader_stride= [KNL]
2811 Set the number of NOCB kthread groups, which
2812 defaults to the square root of the number of
2813 CPUs. Larger numbers reduces the wakeup overhead
2814 on the per-CPU grace-period kthreads, but increases
2815 that same overhead on each group's leader.
2816
2804 rcutree.qhimark= [KNL] 2817 rcutree.qhimark= [KNL]
2805 Set threshold of queued RCU callbacks beyond which 2818 Set threshold of queued RCU callbacks beyond which
2806 batch limiting is disabled. 2819 batch limiting is disabled.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index f1dc4a215593..a4de88fb55f0 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -757,10 +757,14 @@ SMP BARRIER PAIRING
757When dealing with CPU-CPU interactions, certain types of memory barrier should 757When dealing with CPU-CPU interactions, certain types of memory barrier should
758always be paired. A lack of appropriate pairing is almost certainly an error. 758always be paired. A lack of appropriate pairing is almost certainly an error.
759 759
760A write barrier should always be paired with a data dependency barrier or read 760General barriers pair with each other, though they also pair with
761barrier, though a general barrier would also be viable. Similarly a read 761most other types of barriers, albeit without transitivity. An acquire
762barrier or a data dependency barrier should always be paired with at least an 762barrier pairs with a release barrier, but both may also pair with other
763write barrier, though, again, a general barrier is viable: 763barriers, including of course general barriers. A write barrier pairs
764with a data dependency barrier, an acquire barrier, a release barrier,
765a read barrier, or a general barrier. Similarly a read barrier or a
766data dependency barrier pairs with a write barrier, an acquire barrier,
767a release barrier, or a general barrier:
764 768
765 CPU 1 CPU 2 769 CPU 1 CPU 2
766 =============== =============== 770 =============== ===============
@@ -1893,6 +1897,21 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
1893 <general barrier> STORE current->state 1897 <general barrier> STORE current->state
1894 LOAD event_indicated 1898 LOAD event_indicated
1895 1899
1900To repeat, this write memory barrier is present if and only if something
1901is actually awakened. To see this, consider the following sequence of
1902events, where X and Y are both initially zero:
1903
1904 CPU 1 CPU 2
1905 =============================== ===============================
1906 X = 1; STORE event_indicated
1907 smp_mb(); wake_up();
1908 Y = 1; wait_event(wq, Y == 1);
1909 wake_up(); load from Y sees 1, no memory barrier
1910 load from X might see 0
1911
1912In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
1913to see 1.
1914
1896The available waker functions include: 1915The available waker functions include:
1897 1916
1898 complete(); 1917 complete();
diff --git a/MAINTAINERS b/MAINTAINERS
index e31c87474739..6b829e6e726a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -70,6 +70,8 @@ Descriptions of section entries:
70 70
71 P: Person (obsolete) 71 P: Person (obsolete)
72 M: Mail patches to: FullName <address@domain> 72 M: Mail patches to: FullName <address@domain>
73 R: Designated reviewer: FullName <address@domain>
74 These reviewers should be CCed on patches.
73 L: Mailing list that is relevant to this area 75 L: Mailing list that is relevant to this area
74 W: Web-page with status/info 76 W: Web-page with status/info
75 Q: Patchwork web based patch tracking system site 77 Q: Patchwork web based patch tracking system site
@@ -7426,10 +7428,14 @@ L: linux-kernel@vger.kernel.org
7426S: Supported 7428S: Supported
7427T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git 7429T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
7428F: Documentation/RCU/torture.txt 7430F: Documentation/RCU/torture.txt
7429F: kernel/rcu/torture.c 7431F: kernel/rcu/rcutorture.c
7430 7432
7431RCUTORTURE TEST FRAMEWORK 7433RCUTORTURE TEST FRAMEWORK
7432M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 7434M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
7435M: Josh Triplett <josh@joshtriplett.org>
7436R: Steven Rostedt <rostedt@goodmis.org>
7437R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7438R: Lai Jiangshan <laijs@cn.fujitsu.com>
7433L: linux-kernel@vger.kernel.org 7439L: linux-kernel@vger.kernel.org
7434S: Supported 7440S: Supported
7435T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git 7441T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7452,8 +7458,11 @@ S: Supported
7452F: net/rds/ 7458F: net/rds/
7453 7459
7454READ-COPY UPDATE (RCU) 7460READ-COPY UPDATE (RCU)
7455M: Dipankar Sarma <dipankar@in.ibm.com>
7456M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 7461M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
7462M: Josh Triplett <josh@joshtriplett.org>
7463R: Steven Rostedt <rostedt@goodmis.org>
7464R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7465R: Lai Jiangshan <laijs@cn.fujitsu.com>
7457L: linux-kernel@vger.kernel.org 7466L: linux-kernel@vger.kernel.org
7458W: http://www.rdrop.com/users/paulmck/RCU/ 7467W: http://www.rdrop.com/users/paulmck/RCU/
7459S: Supported 7468S: Supported
@@ -7463,7 +7472,7 @@ X: Documentation/RCU/torture.txt
7463F: include/linux/rcu* 7472F: include/linux/rcu*
7464X: include/linux/srcu.h 7473X: include/linux/srcu.h
7465F: kernel/rcu/ 7474F: kernel/rcu/
7466X: kernel/rcu/torture.c 7475X: kernel/torture.c
7467 7476
7468REAL TIME CLOCK (RTC) SUBSYSTEM 7477REAL TIME CLOCK (RTC) SUBSYSTEM
7469M: Alessandro Zummo <a.zummo@towertech.it> 7478M: Alessandro Zummo <a.zummo@towertech.it>
@@ -8236,6 +8245,9 @@ F: mm/sl?b*
8236SLEEPABLE READ-COPY UPDATE (SRCU) 8245SLEEPABLE READ-COPY UPDATE (SRCU)
8237M: Lai Jiangshan <laijs@cn.fujitsu.com> 8246M: Lai Jiangshan <laijs@cn.fujitsu.com>
8238M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 8247M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
8248M: Josh Triplett <josh@joshtriplett.org>
8249R: Steven Rostedt <rostedt@goodmis.org>
8250R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8239L: linux-kernel@vger.kernel.org 8251L: linux-kernel@vger.kernel.org
8240W: http://www.rdrop.com/users/paulmck/RCU/ 8252W: http://www.rdrop.com/users/paulmck/RCU/
8241S: Supported 8253S: Supported
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6df7f9fe0d01..2bb4c4f3531a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
102#define INIT_IDS 102#define INIT_IDS
103#endif 103#endif
104 104
105#ifdef CONFIG_RCU_BOOST
106#define INIT_TASK_RCU_BOOST() \
107 .rcu_boost_mutex = NULL,
108#else
109#define INIT_TASK_RCU_BOOST()
110#endif
111#ifdef CONFIG_TREE_PREEMPT_RCU 105#ifdef CONFIG_TREE_PREEMPT_RCU
112#define INIT_TASK_RCU_TREE_PREEMPT() \ 106#define INIT_TASK_RCU_TREE_PREEMPT() \
113 .rcu_blocked_node = NULL, 107 .rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
119 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
120 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special = 0, \
121 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
122 INIT_TASK_RCU_TREE_PREEMPT() \ 116 INIT_TASK_RCU_TREE_PREEMPT()
123 INIT_TASK_RCU_BOOST()
124#else 117#else
125#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
126#endif 119#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..d231aa17b1d7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
@@ -852,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void)
852 * read-side critical section that would block in a !PREEMPT kernel. 826 * read-side critical section that would block in a !PREEMPT kernel.
853 * But if you want the full story, read on! 827 * But if you want the full story, read on!
854 * 828 *
855 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it 829 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
856 * is illegal to block while in an RCU read-side critical section. In 830 * it is illegal to block while in an RCU read-side critical section.
857 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) 831 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
858 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may 832 * kernel builds, RCU read-side critical sections may be preempted,
859 * be preempted, but explicit blocking is illegal. Finally, in preemptible 833 * but explicit blocking is illegal. Finally, in preemptible RCU
860 * RCU implementations in real-time (with -rt patchset) kernel builds, 834 * implementations in real-time (with -rt patchset) kernel builds, RCU
861 * RCU read-side critical sections may be preempted and they may also 835 * read-side critical sections may be preempted and they may also block, but
862 * block, but only when acquiring spinlocks that are subject to priority 836 * only when acquiring spinlocks that are subject to priority inheritance.
863 * inheritance.
864 */ 837 */
865static inline void rcu_read_lock(void) 838static inline void rcu_read_lock(void)
866{ 839{
@@ -884,6 +857,34 @@ static inline void rcu_read_lock(void)
884/** 857/**
885 * rcu_read_unlock() - marks the end of an RCU read-side critical section. 858 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
886 * 859 *
860 * In most situations, rcu_read_unlock() is immune from deadlock.
861 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
862 * is responsible for deboosting, which it does via rt_mutex_unlock().
863 * Unfortunately, this function acquires the scheduler's runqueue and
864 * priority-inheritance spinlocks. This means that deadlock could result
865 * if the caller of rcu_read_unlock() already holds one of these locks or
866 * any lock that is ever acquired while holding them.
867 *
868 * That said, RCU readers are never priority boosted unless they were
869 * preempted. Therefore, one way to avoid deadlock is to make sure
870 * that preemption never happens within any RCU read-side critical
871 * section whose outermost rcu_read_unlock() is called with one of
872 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
873 * a number of ways, for example, by invoking preempt_disable() before
874 * critical section's outermost rcu_read_lock().
875 *
876 * Given that the set of locks acquired by rt_mutex_unlock() might change
877 * at any time, a somewhat more future-proofed approach is to make sure
878 * that that preemption never happens within any RCU read-side critical
879 * section whose outermost rcu_read_unlock() is called with irqs disabled.
880 * This approach relies on the fact that rt_mutex_unlock() currently only
881 * acquires irq-disabled locks.
882 *
883 * The second of these two approaches is best in most situations,
884 * however, the first approach can also be useful, at least to those
885 * developers willing to keep abreast of the set of locks acquired by
886 * rt_mutex_unlock().
887 *
887 * See rcu_read_lock() for more information. 888 * See rcu_read_lock() for more information.
888 */ 889 */
889static inline void rcu_read_unlock(void) 890static inline void rcu_read_unlock(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..3cfbc05e66e6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1270,9 +1270,6 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1271 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_RCU_BOOST
1274 struct rt_mutex *rcu_boost_mutex;
1275#endif /* #ifdef CONFIG_RCU_BOOST */
1276 1273
1277#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1278 struct sched_info sched_info; 1275 struct sched_info sched_info;
@@ -2009,9 +2006,6 @@ static inline void rcu_copy_process(struct task_struct *p)
2009#ifdef CONFIG_TREE_PREEMPT_RCU 2006#ifdef CONFIG_TREE_PREEMPT_RCU
2010 p->rcu_blocked_node = NULL; 2007 p->rcu_blocked_node = NULL;
2011#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2008#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2012#ifdef CONFIG_RCU_BOOST
2013 p->rcu_boost_mutex = NULL;
2014#endif /* #ifdef CONFIG_RCU_BOOST */
2015 INIT_LIST_HEAD(&p->rcu_node_entry); 2009 INIT_LIST_HEAD(&p->rcu_node_entry);
2016} 2010}
2017 2011
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773cb9f4c..06cc093ab7ad 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -12,6 +12,7 @@
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13#include <linux/context_tracking_state.h> 13#include <linux/context_tracking_state.h>
14#include <linux/cpumask.h> 14#include <linux/cpumask.h>
15#include <linux/sched.h>
15 16
16#ifdef CONFIG_GENERIC_CLOCKEVENTS 17#ifdef CONFIG_GENERIC_CLOCKEVENTS
17 18
@@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
162#ifdef CONFIG_NO_HZ_FULL 163#ifdef CONFIG_NO_HZ_FULL
163extern bool tick_nohz_full_running; 164extern bool tick_nohz_full_running;
164extern cpumask_var_t tick_nohz_full_mask; 165extern cpumask_var_t tick_nohz_full_mask;
166extern cpumask_var_t housekeeping_mask;
165 167
166static inline bool tick_nohz_full_enabled(void) 168static inline bool tick_nohz_full_enabled(void)
167{ 169{
@@ -194,6 +196,24 @@ static inline void tick_nohz_full_kick_all(void) { }
194static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 196static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
195#endif 197#endif
196 198
199static inline bool is_housekeeping_cpu(int cpu)
200{
201#ifdef CONFIG_NO_HZ_FULL
202 if (tick_nohz_full_enabled())
203 return cpumask_test_cpu(cpu, housekeeping_mask);
204#endif
205 return true;
206}
207
208static inline void housekeeping_affine(struct task_struct *t)
209{
210#ifdef CONFIG_NO_HZ_FULL
211 if (tick_nohz_full_enabled())
212 set_cpus_allowed_ptr(t, housekeeping_mask);
213
214#endif
215}
216
197static inline void tick_nohz_full_check(void) 217static inline void tick_nohz_full_check(void)
198{ 218{
199 if (tick_nohz_full_enabled()) 219 if (tick_nohz_full_enabled())
diff --git a/init/Kconfig b/init/Kconfig
index 9d76b99af1b9..41066e49e880 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -505,7 +505,7 @@ config PREEMPT_RCU
505 def_bool TREE_PREEMPT_RCU 505 def_bool TREE_PREEMPT_RCU
506 help 506 help
507 This option enables preemptible-RCU code that is common between 507 This option enables preemptible-RCU code that is common between
508 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. 508 TREE_PREEMPT_RCU and, in the old days, TINY_PREEMPT_RCU.
509 509
510config RCU_STALL_COMMON 510config RCU_STALL_COMMON
511 def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE ) 511 def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
@@ -737,7 +737,7 @@ choice
737 737
738config RCU_NOCB_CPU_NONE 738config RCU_NOCB_CPU_NONE
739 bool "No build_forced no-CBs CPUs" 739 bool "No build_forced no-CBs CPUs"
740 depends on RCU_NOCB_CPU && !NO_HZ_FULL 740 depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
741 help 741 help
742 This option does not force any of the CPUs to be no-CBs CPUs. 742 This option does not force any of the CPUs to be no-CBs CPUs.
743 Only CPUs designated by the rcu_nocbs= boot parameter will be 743 Only CPUs designated by the rcu_nocbs= boot parameter will be
@@ -751,7 +751,7 @@ config RCU_NOCB_CPU_NONE
751 751
752config RCU_NOCB_CPU_ZERO 752config RCU_NOCB_CPU_ZERO
753 bool "CPU 0 is a build_forced no-CBs CPU" 753 bool "CPU 0 is a build_forced no-CBs CPU"
754 depends on RCU_NOCB_CPU && !NO_HZ_FULL 754 depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
755 help 755 help
756 This option forces CPU 0 to be a no-CBs CPU, so that its RCU 756 This option forces CPU 0 to be a no-CBs CPU, so that its RCU
757 callbacks are invoked by a per-CPU kthread whose name begins 757 callbacks are invoked by a per-CPU kthread whose name begins
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index bfda2726ca45..ff1a6de62f17 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -99,6 +99,10 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
99 99
100void kfree(const void *); 100void kfree(const void *);
101 101
102/*
103 * Reclaim the specified callback, either by invoking it (non-lazy case)
104 * or freeing it directly (lazy case). Return true if lazy, false otherwise.
105 */
102static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) 106static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
103{ 107{
104 unsigned long offset = (unsigned long)head->func; 108 unsigned long offset = (unsigned long)head->func;
@@ -108,12 +112,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
108 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); 112 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
109 kfree((void *)head - offset); 113 kfree((void *)head - offset);
110 rcu_lock_release(&rcu_callback_map); 114 rcu_lock_release(&rcu_callback_map);
111 return 1; 115 return true;
112 } else { 116 } else {
113 RCU_TRACE(trace_rcu_invoke_callback(rn, head)); 117 RCU_TRACE(trace_rcu_invoke_callback(rn, head));
114 head->func(head); 118 head->func(head);
115 rcu_lock_release(&rcu_callback_map); 119 rcu_lock_release(&rcu_callback_map);
116 return 0; 120 return false;
117 } 121 }
118} 122}
119 123
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index c639556f3fa0..e037f3eb2f7b 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
298 298
299 idx = ACCESS_ONCE(sp->completed) & 0x1; 299 idx = ACCESS_ONCE(sp->completed) & 0x1;
300 preempt_disable(); 300 preempt_disable();
301 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; 301 __this_cpu_inc(sp->per_cpu_ref->c[idx]);
302 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 302 smp_mb(); /* B */ /* Avoid leaking the critical section. */
303 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; 303 __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
304 preempt_enable(); 304 preempt_enable();
305 return idx; 305 return idx;
306} 306}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..1b70cb6fbe3c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
206 rdp->passed_quiesce = 1; 206 rdp->passed_quiesce = 1;
207} 207}
208 208
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210
211static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 .dynticks = ATOMIC_INIT(1),
214#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 .dynticks_idle = ATOMIC_INIT(1),
217#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218};
219
220/*
221 * Let the RCU core know that this CPU has gone through the scheduler,
222 * which is a quiescent state. This is called when the need for a
223 * quiescent state is urgent, so we burn an atomic operation and full
224 * memory barriers to let the RCU core know about it, regardless of what
225 * this CPU might (or might not) do in the near future.
226 *
227 * We inform the RCU core by emulating a zero-duration dyntick-idle
228 * period, which we in turn do by incrementing the ->dynticks counter
229 * by two.
230 */
231static void rcu_momentary_dyntick_idle(void)
232{
233 unsigned long flags;
234 struct rcu_data *rdp;
235 struct rcu_dynticks *rdtp;
236 int resched_mask;
237 struct rcu_state *rsp;
238
239 local_irq_save(flags);
240
241 /*
242 * Yes, we can lose flag-setting operations. This is OK, because
243 * the flag will be set again after some delay.
244 */
245 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 raw_cpu_write(rcu_sched_qs_mask, 0);
247
248 /* Find the flavor that needs a quiescent state. */
249 for_each_rcu_flavor(rsp) {
250 rdp = raw_cpu_ptr(rsp->rda);
251 if (!(resched_mask & rsp->flavor_mask))
252 continue;
253 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 if (ACCESS_ONCE(rdp->mynode->completed) !=
255 ACCESS_ONCE(rdp->cond_resched_completed))
256 continue;
257
258 /*
259 * Pretend to be momentarily idle for the quiescent state.
260 * This allows the grace-period kthread to record the
261 * quiescent state, with no need for this CPU to do anything
262 * further.
263 */
264 rdtp = this_cpu_ptr(&rcu_dynticks);
265 smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 atomic_add(2, &rdtp->dynticks); /* QS. */
267 smp_mb__after_atomic(); /* Later stuff after QS. */
268 break;
269 }
270 local_irq_restore(flags);
271}
272
209/* 273/*
210 * Note a context switch. This is a quiescent state for RCU-sched, 274 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU. 275 * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
216 trace_rcu_utilization(TPS("Start context switch")); 280 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu); 281 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu); 282 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle();
219 trace_rcu_utilization(TPS("End context switch")); 285 trace_rcu_utilization(TPS("End context switch"));
220} 286}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch); 287EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 288
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 289static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */ 290static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */ 291static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
243module_param(jiffies_till_first_fqs, ulong, 0644); 300module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644); 301module_param(jiffies_till_next_fqs, ulong, 0644);
245 302
303/*
304 * How long the grace period must be before we start recruiting
305 * quiescent-state help from rcu_note_context_switch().
306 */
307static ulong jiffies_till_sched_qs = HZ / 20;
308module_param(jiffies_till_sched_qs, ulong, 0644);
309
246static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 310static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp); 311 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp, 312static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
853 bool *isidle, unsigned long *maxj) 917 bool *isidle, unsigned long *maxj)
854{ 918{
855 unsigned int curr; 919 unsigned int curr;
920 int *rcrmp;
856 unsigned int snap; 921 unsigned int snap;
857 922
858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 923 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
893 } 958 }
894 959
895 /* 960 /*
896 * There is a possibility that a CPU in adaptive-ticks state 961 * A CPU running for an extended time within the kernel can
897 * might run in the kernel with the scheduling-clock tick disabled 962 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 963 * even context-switching back and forth between a pair of
899 * force the CPU to restart the scheduling-clock tick in this 964 * in-kernel CPU-bound tasks cannot advance grace periods.
900 * CPU is in this state. 965 * So if the grace period is old enough, make the CPU pay attention.
901 */ 966 * Note that the unsynchronized assignments to the per-CPU
902 rcu_kick_nohz_cpu(rdp->cpu); 967 * rcu_sched_qs_mask variable are safe. Yes, setting of
903 968 * bits can be lost, but they will be set again on the next
904 /* 969 * force-quiescent-state pass. So lost bit sets do not result
905 * Alternatively, the CPU might be running in the kernel 970 * in incorrect behavior, merely in a grace period lasting
906 * for an extended period of time without a quiescent state. 971 * a few jiffies longer than it might otherwise. Because
907 * Attempt to force the CPU through the scheduler to gain the 972 * there are at most four threads involved, and because the
908 * needed quiescent state, but only if the grace period has gone 973 * updates are only once every few jiffies, the probability of
909 * on for an uncommonly long time. If there are many stuck CPUs, 974 * lossage (and thus of slight grace-period extension) is
910 * we will beat on the first one until it gets unstuck, then move 975 * quite low.
911 * to the next. Only do this for the primary flavor of RCU. 976 *
977 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 * is set too high, we override with half of the RCU CPU stall
979 * warning delay.
912 */ 980 */
913 if (rdp->rsp == rcu_state_p && 981 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 if (ULONG_CMP_GE(jiffies,
983 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 984 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
915 rdp->rsp->jiffies_resched += 5; 985 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
916 resched_cpu(rdp->cpu); 986 ACCESS_ONCE(rdp->cond_resched_completed) =
987 ACCESS_ONCE(rdp->mynode->completed);
988 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 ACCESS_ONCE(*rcrmp) =
990 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
992 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 /* Time to beat on that CPU again! */
995 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
996 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 }
917 } 998 }
918 999
919 return 0; 1000 return 0;
@@ -932,10 +1013,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
932} 1013}
933 1014
934/* 1015/*
935 * Dump stacks of all tasks running on stalled CPUs. This is a fallback 1016 * Dump stacks of all tasks running on stalled CPUs.
936 * for architectures that do not implement trigger_all_cpu_backtrace().
937 * The NMI-triggered stack traces are more accurate because they are
938 * printed by the target CPU.
939 */ 1017 */
940static void rcu_dump_cpu_stacks(struct rcu_state *rsp) 1018static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
941{ 1019{
@@ -1013,7 +1091,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
1013 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1091 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1014 if (ndetected == 0) 1092 if (ndetected == 0)
1015 pr_err("INFO: Stall ended before state dump start\n"); 1093 pr_err("INFO: Stall ended before state dump start\n");
1016 else if (!trigger_all_cpu_backtrace()) 1094 else
1017 rcu_dump_cpu_stacks(rsp); 1095 rcu_dump_cpu_stacks(rsp);
1018 1096
1019 /* Complain about tasks blocking the grace period. */ 1097 /* Complain about tasks blocking the grace period. */
@@ -1044,8 +1122,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
1044 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1122 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1045 jiffies - rsp->gp_start, 1123 jiffies - rsp->gp_start,
1046 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1124 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1047 if (!trigger_all_cpu_backtrace()) 1125 rcu_dump_cpu_stacks(rsp);
1048 dump_stack();
1049 1126
1050 raw_spin_lock_irqsave(&rnp->lock, flags); 1127 raw_spin_lock_irqsave(&rnp->lock, flags);
1051 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall))) 1128 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
@@ -1224,10 +1301,16 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1224 * believe that a grace period is in progress, then we must wait 1301 * believe that a grace period is in progress, then we must wait
1225 * for the one following, which is in "c". Because our request 1302 * for the one following, which is in "c". Because our request
1226 * will be noticed at the end of the current grace period, we don't 1303 * will be noticed at the end of the current grace period, we don't
1227 * need to explicitly start one. 1304 * need to explicitly start one. We only do the lockless check
1305 * of rnp_root's fields if the current rcu_node structure thinks
1306 * there is no grace period in flight, and because we hold rnp->lock,
1307 * the only possible change is when rnp_root's two fields are
1308 * equal, in which case rnp_root->gpnum might be concurrently
1309 * incremented. But that is OK, as it will just result in our
1310 * doing some extra useless work.
1228 */ 1311 */
1229 if (rnp->gpnum != rnp->completed || 1312 if (rnp->gpnum != rnp->completed ||
1230 ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) { 1313 ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
1231 rnp->need_future_gp[c & 0x1]++; 1314 rnp->need_future_gp[c & 0x1]++;
1232 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); 1315 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1233 goto out; 1316 goto out;
@@ -1564,11 +1647,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
1564 rnp->level, rnp->grplo, 1647 rnp->level, rnp->grplo,
1565 rnp->grphi, rnp->qsmask); 1648 rnp->grphi, rnp->qsmask);
1566 raw_spin_unlock_irq(&rnp->lock); 1649 raw_spin_unlock_irq(&rnp->lock);
1567#ifdef CONFIG_PROVE_RCU_DELAY
1568 if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1569 system_state == SYSTEM_RUNNING)
1570 udelay(200);
1571#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1572 cond_resched(); 1650 cond_resched();
1573 } 1651 }
1574 1652
@@ -2266,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2266 } 2344 }
2267 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2345 smp_mb(); /* List handling before counting for rcu_barrier(). */
2268 rdp->qlen_lazy -= count_lazy; 2346 rdp->qlen_lazy -= count_lazy;
2269 ACCESS_ONCE(rdp->qlen) -= count; 2347 ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2270 rdp->n_cbs_invoked += count; 2348 rdp->n_cbs_invoked += count;
2271 2349
2272 /* Reinstate batch limit if we have worked down the excess. */ 2350 /* Reinstate batch limit if we have worked down the excess. */
@@ -2404,14 +2482,14 @@ static void force_quiescent_state(struct rcu_state *rsp)
2404 struct rcu_node *rnp_old = NULL; 2482 struct rcu_node *rnp_old = NULL;
2405 2483
2406 /* Funnel through hierarchy to reduce memory contention. */ 2484 /* Funnel through hierarchy to reduce memory contention. */
2407 rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; 2485 rnp = __this_cpu_read(rsp->rda->mynode);
2408 for (; rnp != NULL; rnp = rnp->parent) { 2486 for (; rnp != NULL; rnp = rnp->parent) {
2409 ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || 2487 ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2410 !raw_spin_trylock(&rnp->fqslock); 2488 !raw_spin_trylock(&rnp->fqslock);
2411 if (rnp_old != NULL) 2489 if (rnp_old != NULL)
2412 raw_spin_unlock(&rnp_old->fqslock); 2490 raw_spin_unlock(&rnp_old->fqslock);
2413 if (ret) { 2491 if (ret) {
2414 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2492 rsp->n_force_qs_lh++;
2415 return; 2493 return;
2416 } 2494 }
2417 rnp_old = rnp; 2495 rnp_old = rnp;
@@ -2423,7 +2501,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2423 smp_mb__after_unlock_lock(); 2501 smp_mb__after_unlock_lock();
2424 raw_spin_unlock(&rnp_old->fqslock); 2502 raw_spin_unlock(&rnp_old->fqslock);
2425 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2503 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2426 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2504 rsp->n_force_qs_lh++;
2427 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2505 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2428 return; /* Someone beat us to it. */ 2506 return; /* Someone beat us to it. */
2429 } 2507 }
@@ -2581,7 +2659,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2581 unsigned long flags; 2659 unsigned long flags;
2582 struct rcu_data *rdp; 2660 struct rcu_data *rdp;
2583 2661
2584 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */ 2662 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
2585 if (debug_rcu_head_queue(head)) { 2663 if (debug_rcu_head_queue(head)) {
2586 /* Probable double call_rcu(), so leak the callback. */ 2664 /* Probable double call_rcu(), so leak the callback. */
2587 ACCESS_ONCE(head->func) = rcu_leak_callback; 2665 ACCESS_ONCE(head->func) = rcu_leak_callback;
@@ -2612,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2612 local_irq_restore(flags); 2690 local_irq_restore(flags);
2613 return; 2691 return;
2614 } 2692 }
2615 ACCESS_ONCE(rdp->qlen)++; 2693 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2616 if (lazy) 2694 if (lazy)
2617 rdp->qlen_lazy++; 2695 rdp->qlen_lazy++;
2618 else 2696 else
@@ -3176,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3176 * ACCESS_ONCE() to prevent the compiler from speculating 3254 * ACCESS_ONCE() to prevent the compiler from speculating
3177 * the increment to precede the early-exit check. 3255 * the increment to precede the early-exit check.
3178 */ 3256 */
3179 ACCESS_ONCE(rsp->n_barrier_done)++; 3257 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3180 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 3258 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3181 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); 3259 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3182 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 3260 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3226,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3226 3304
3227 /* Increment ->n_barrier_done to prevent duplicate work. */ 3305 /* Increment ->n_barrier_done to prevent duplicate work. */
3228 smp_mb(); /* Keep increment after above mechanism. */ 3306 smp_mb(); /* Keep increment after above mechanism. */
3229 ACCESS_ONCE(rsp->n_barrier_done)++; 3307 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3230 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 3308 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3231 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); 3309 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3232 smp_mb(); /* Keep increment before caller's subsequent code. */ 3310 smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3483,14 +3561,17 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
3483static void __init rcu_init_one(struct rcu_state *rsp, 3561static void __init rcu_init_one(struct rcu_state *rsp,
3484 struct rcu_data __percpu *rda) 3562 struct rcu_data __percpu *rda)
3485{ 3563{
3486 static char *buf[] = { "rcu_node_0", 3564 static const char * const buf[] = {
3487 "rcu_node_1", 3565 "rcu_node_0",
3488 "rcu_node_2", 3566 "rcu_node_1",
3489 "rcu_node_3" }; /* Match MAX_RCU_LVLS */ 3567 "rcu_node_2",
3490 static char *fqs[] = { "rcu_node_fqs_0", 3568 "rcu_node_3" }; /* Match MAX_RCU_LVLS */
3491 "rcu_node_fqs_1", 3569 static const char * const fqs[] = {
3492 "rcu_node_fqs_2", 3570 "rcu_node_fqs_0",
3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3571 "rcu_node_fqs_1",
3572 "rcu_node_fqs_2",
3573 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3574 static u8 fl_mask = 0x1;
3494 int cpustride = 1; 3575 int cpustride = 1;
3495 int i; 3576 int i;
3496 int j; 3577 int j;
@@ -3509,6 +3590,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3509 for (i = 1; i < rcu_num_lvls; i++) 3590 for (i = 1; i < rcu_num_lvls; i++)
3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3591 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3511 rcu_init_levelspread(rsp); 3592 rcu_init_levelspread(rsp);
3593 rsp->flavor_mask = fl_mask;
3594 fl_mask <<= 1;
3512 3595
3513 /* Initialize the elements themselves, starting from the leaves. */ 3596 /* Initialize the elements themselves, starting from the leaves. */
3514 3597
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..71e64c718f75 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -172,6 +172,14 @@ struct rcu_node {
172 /* queued on this rcu_node structure that */ 172 /* queued on this rcu_node structure that */
173 /* are blocking the current grace period, */ 173 /* are blocking the current grace period, */
174 /* there can be no such task. */ 174 /* there can be no such task. */
175 struct completion boost_completion;
176 /* Used to ensure that the rt_mutex used */
177 /* to carry out the boosting is fully */
178 /* released with no future boostee accesses */
179 /* before that rt_mutex is re-initialized. */
180 struct rt_mutex boost_mtx;
181 /* Used only for the priority-boosting */
182 /* side effect, not as a lock. */
175 unsigned long boost_time; 183 unsigned long boost_time;
176 /* When to start boosting (jiffies). */ 184 /* When to start boosting (jiffies). */
177 struct task_struct *boost_kthread_task; 185 struct task_struct *boost_kthread_task;
@@ -307,6 +315,9 @@ struct rcu_data {
307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 315 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 316 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
309 unsigned long offline_fqs; /* Kicked due to being offline. */ 317 unsigned long offline_fqs; /* Kicked due to being offline. */
318 unsigned long cond_resched_completed;
319 /* Grace period that needs help */
320 /* from cond_resched(). */
310 321
311 /* 5) __rcu_pending() statistics. */ 322 /* 5) __rcu_pending() statistics. */
312 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 323 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -331,11 +342,29 @@ struct rcu_data {
331 struct rcu_head **nocb_tail; 342 struct rcu_head **nocb_tail;
332 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ 343 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
333 atomic_long_t nocb_q_count_lazy; /* (approximate). */ 344 atomic_long_t nocb_q_count_lazy; /* (approximate). */
345 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
346 struct rcu_head **nocb_follower_tail;
347 atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
348 atomic_long_t nocb_follower_count_lazy; /* (approximate). */
334 int nocb_p_count; /* # CBs being invoked by kthread */ 349 int nocb_p_count; /* # CBs being invoked by kthread */
335 int nocb_p_count_lazy; /* (approximate). */ 350 int nocb_p_count_lazy; /* (approximate). */
336 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 351 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
337 struct task_struct *nocb_kthread; 352 struct task_struct *nocb_kthread;
338 bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 353 bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
354
355 /* The following fields are used by the leader, hence own cacheline. */
356 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
357 /* CBs waiting for GP. */
358 struct rcu_head **nocb_gp_tail;
359 long nocb_gp_count;
360 long nocb_gp_count_lazy;
361 bool nocb_leader_wake; /* Is the nocb leader thread awake? */
362 struct rcu_data *nocb_next_follower;
363 /* Next follower in wakeup chain. */
364
365 /* The following fields are used by the follower, hence new cachline. */
366 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
367 /* Leader CPU takes GP-end wakeups. */
339#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 368#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
340 369
341 /* 8) RCU CPU stall data. */ 370 /* 8) RCU CPU stall data. */
@@ -392,6 +421,7 @@ struct rcu_state {
392 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 421 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
393 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 422 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
394 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 423 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
424 u8 flavor_mask; /* bit in flavor mask. */
395 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 425 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
396 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 426 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
397 void (*func)(struct rcu_head *head)); 427 void (*func)(struct rcu_head *head));
@@ -563,7 +593,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
563static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 593static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
564static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 594static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
565static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 595static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
566static void rcu_kick_nohz_cpu(int cpu); 596static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
567static bool init_nocb_callback_list(struct rcu_data *rdp); 597static bool init_nocb_callback_list(struct rcu_data *rdp);
568static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); 598static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); 599static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
@@ -583,8 +613,14 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
583/* Sum up queue lengths for tracing. */ 613/* Sum up queue lengths for tracing. */
584static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 614static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
585{ 615{
586 *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count; 616 *ql = atomic_long_read(&rdp->nocb_q_count) +
587 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy; 617 rdp->nocb_p_count +
618 atomic_long_read(&rdp->nocb_follower_count) +
619 rdp->nocb_p_count + rdp->nocb_gp_count;
620 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
621 rdp->nocb_p_count_lazy +
622 atomic_long_read(&rdp->nocb_follower_count_lazy) +
623 rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
588} 624}
589#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 625#else /* #ifdef CONFIG_RCU_NOCB_CPU */
590static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 626static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..00dc411e9676 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -33,6 +33,7 @@
33#define RCU_KTHREAD_PRIO 1 33#define RCU_KTHREAD_PRIO 1
34 34
35#ifdef CONFIG_RCU_BOOST 35#ifdef CONFIG_RCU_BOOST
36#include "../locking/rtmutex_common.h"
36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 37#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37#else 38#else
38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 39#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
@@ -336,7 +337,7 @@ void rcu_read_unlock_special(struct task_struct *t)
336 unsigned long flags; 337 unsigned long flags;
337 struct list_head *np; 338 struct list_head *np;
338#ifdef CONFIG_RCU_BOOST 339#ifdef CONFIG_RCU_BOOST
339 struct rt_mutex *rbmp = NULL; 340 bool drop_boost_mutex = false;
340#endif /* #ifdef CONFIG_RCU_BOOST */ 341#endif /* #ifdef CONFIG_RCU_BOOST */
341 struct rcu_node *rnp; 342 struct rcu_node *rnp;
342 int special; 343 int special;
@@ -398,11 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t)
398#ifdef CONFIG_RCU_BOOST 399#ifdef CONFIG_RCU_BOOST
399 if (&t->rcu_node_entry == rnp->boost_tasks) 400 if (&t->rcu_node_entry == rnp->boost_tasks)
400 rnp->boost_tasks = np; 401 rnp->boost_tasks = np;
401 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */ 402 /* Snapshot ->boost_mtx ownership with rcu_node lock held. */
402 if (t->rcu_boost_mutex) { 403 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
403 rbmp = t->rcu_boost_mutex;
404 t->rcu_boost_mutex = NULL;
405 }
406#endif /* #ifdef CONFIG_RCU_BOOST */ 404#endif /* #ifdef CONFIG_RCU_BOOST */
407 405
408 /* 406 /*
@@ -427,8 +425,10 @@ void rcu_read_unlock_special(struct task_struct *t)
427 425
428#ifdef CONFIG_RCU_BOOST 426#ifdef CONFIG_RCU_BOOST
429 /* Unboost if we were boosted. */ 427 /* Unboost if we were boosted. */
430 if (rbmp) 428 if (drop_boost_mutex) {
431 rt_mutex_unlock(rbmp); 429 rt_mutex_unlock(&rnp->boost_mtx);
430 complete(&rnp->boost_completion);
431 }
432#endif /* #ifdef CONFIG_RCU_BOOST */ 432#endif /* #ifdef CONFIG_RCU_BOOST */
433 433
434 /* 434 /*
@@ -988,6 +988,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
988 988
989/* Because preemptible RCU does not exist, no quieting of tasks. */ 989/* Because preemptible RCU does not exist, no quieting of tasks. */
990static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 990static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
991 __releases(rnp->lock)
991{ 992{
992 raw_spin_unlock_irqrestore(&rnp->lock, flags); 993 raw_spin_unlock_irqrestore(&rnp->lock, flags);
993} 994}
@@ -1149,7 +1150,6 @@ static void rcu_wake_cond(struct task_struct *t, int status)
1149static int rcu_boost(struct rcu_node *rnp) 1150static int rcu_boost(struct rcu_node *rnp)
1150{ 1151{
1151 unsigned long flags; 1152 unsigned long flags;
1152 struct rt_mutex mtx;
1153 struct task_struct *t; 1153 struct task_struct *t;
1154 struct list_head *tb; 1154 struct list_head *tb;
1155 1155
@@ -1200,11 +1200,15 @@ static int rcu_boost(struct rcu_node *rnp)
1200 * section. 1200 * section.
1201 */ 1201 */
1202 t = container_of(tb, struct task_struct, rcu_node_entry); 1202 t = container_of(tb, struct task_struct, rcu_node_entry);
1203 rt_mutex_init_proxy_locked(&mtx, t); 1203 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1204 t->rcu_boost_mutex = &mtx; 1204 init_completion(&rnp->boost_completion);
1205 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1205 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1206 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1206 /* Lock only for side effect: boosts task t's priority. */
1207 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 1207 rt_mutex_lock(&rnp->boost_mtx);
1208 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1209
1210 /* Wait for boostee to be done w/boost_mtx before reinitializing. */
1211 wait_for_completion(&rnp->boost_completion);
1208 1212
1209 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1213 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1210 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1214 ACCESS_ONCE(rnp->boost_tasks) != NULL;
@@ -1256,6 +1260,7 @@ static int rcu_boost_kthread(void *arg)
1256 * about it going away. 1260 * about it going away.
1257 */ 1261 */
1258static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1262static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1263 __releases(rnp->lock)
1259{ 1264{
1260 struct task_struct *t; 1265 struct task_struct *t;
1261 1266
@@ -1491,6 +1496,7 @@ static void rcu_prepare_kthreads(int cpu)
1491#else /* #ifdef CONFIG_RCU_BOOST */ 1496#else /* #ifdef CONFIG_RCU_BOOST */
1492 1497
1493static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1498static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1499 __releases(rnp->lock)
1494{ 1500{
1495 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1501 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1496} 1502}
@@ -2060,6 +2066,22 @@ bool rcu_is_nocb_cpu(int cpu)
2060#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 2066#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
2061 2067
2062/* 2068/*
2069 * Kick the leader kthread for this NOCB group.
2070 */
2071static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2072{
2073 struct rcu_data *rdp_leader = rdp->nocb_leader;
2074
2075 if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
2076 return;
2077 if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
2078 /* Prior xchg orders against prior callback enqueue. */
2079 ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
2080 wake_up(&rdp_leader->nocb_wq);
2081 }
2082}
2083
2084/*
2063 * Enqueue the specified string of rcu_head structures onto the specified 2085 * Enqueue the specified string of rcu_head structures onto the specified
2064 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2086 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2065 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2087 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
@@ -2093,7 +2115,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2093 len = atomic_long_read(&rdp->nocb_q_count); 2115 len = atomic_long_read(&rdp->nocb_q_count);
2094 if (old_rhpp == &rdp->nocb_head) { 2116 if (old_rhpp == &rdp->nocb_head) {
2095 if (!irqs_disabled_flags(flags)) { 2117 if (!irqs_disabled_flags(flags)) {
2096 wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */ 2118 /* ... if queue was empty ... */
2119 wake_nocb_leader(rdp, false);
2097 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2120 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2098 TPS("WakeEmpty")); 2121 TPS("WakeEmpty"));
2099 } else { 2122 } else {
@@ -2103,7 +2126,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2103 } 2126 }
2104 rdp->qlen_last_fqs_check = 0; 2127 rdp->qlen_last_fqs_check = 0;
2105 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2128 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2106 wake_up_process(t); /* ... or if many callbacks queued. */ 2129 /* ... or if many callbacks queued. */
2130 wake_nocb_leader(rdp, true);
2107 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2131 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2108 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf")); 2132 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
2109 } else { 2133 } else {
@@ -2213,13 +2237,150 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2213} 2237}
2214 2238
2215/* 2239/*
2240 * Leaders come here to wait for additional callbacks to show up.
2241 * This function does not return until callbacks appear.
2242 */
2243static void nocb_leader_wait(struct rcu_data *my_rdp)
2244{
2245 bool firsttime = true;
2246 bool gotcbs;
2247 struct rcu_data *rdp;
2248 struct rcu_head **tail;
2249
2250wait_again:
2251
2252 /* Wait for callbacks to appear. */
2253 if (!rcu_nocb_poll) {
2254 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2255 wait_event_interruptible(my_rdp->nocb_wq,
2256 ACCESS_ONCE(my_rdp->nocb_leader_wake));
2257 /* Memory barrier handled by smp_mb() calls below and repoll. */
2258 } else if (firsttime) {
2259 firsttime = false; /* Don't drown trace log with "Poll"! */
2260 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
2261 }
2262
2263 /*
2264 * Each pass through the following loop checks a follower for CBs.
2265 * We are our own first follower. Any CBs found are moved to
2266 * nocb_gp_head, where they await a grace period.
2267 */
2268 gotcbs = false;
2269 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2270 rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
2271 if (!rdp->nocb_gp_head)
2272 continue; /* No CBs here, try next follower. */
2273
2274 /* Move callbacks to wait-for-GP list, which is empty. */
2275 ACCESS_ONCE(rdp->nocb_head) = NULL;
2276 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2277 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
2278 rdp->nocb_gp_count_lazy =
2279 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2280 gotcbs = true;
2281 }
2282
2283 /*
2284 * If there were no callbacks, sleep a bit, rescan after a
2285 * memory barrier, and go retry.
2286 */
2287 if (unlikely(!gotcbs)) {
2288 if (!rcu_nocb_poll)
2289 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2290 "WokeEmpty");
2291 flush_signals(current);
2292 schedule_timeout_interruptible(1);
2293
2294 /* Rescan in case we were a victim of memory ordering. */
2295 my_rdp->nocb_leader_wake = false;
2296 smp_mb(); /* Ensure _wake false before scan. */
2297 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2298 if (ACCESS_ONCE(rdp->nocb_head)) {
2299 /* Found CB, so short-circuit next wait. */
2300 my_rdp->nocb_leader_wake = true;
2301 break;
2302 }
2303 goto wait_again;
2304 }
2305
2306 /* Wait for one grace period. */
2307 rcu_nocb_wait_gp(my_rdp);
2308
2309 /*
2310 * We left ->nocb_leader_wake set to reduce cache thrashing.
2311 * We clear it now, but recheck for new callbacks while
2312 * traversing our follower list.
2313 */
2314 my_rdp->nocb_leader_wake = false;
2315 smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
2316
2317 /* Each pass through the following loop wakes a follower, if needed. */
2318 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2319 if (ACCESS_ONCE(rdp->nocb_head))
2320 my_rdp->nocb_leader_wake = true; /* No need to wait. */
2321 if (!rdp->nocb_gp_head)
2322 continue; /* No CBs, so no need to wake follower. */
2323
2324 /* Append callbacks to follower's "done" list. */
2325 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2326 *tail = rdp->nocb_gp_head;
2327 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2328 atomic_long_add(rdp->nocb_gp_count_lazy,
2329 &rdp->nocb_follower_count_lazy);
2330 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2331 /*
2332 * List was empty, wake up the follower.
2333 * Memory barriers supplied by atomic_long_add().
2334 */
2335 wake_up(&rdp->nocb_wq);
2336 }
2337 }
2338
2339 /* If we (the leader) don't have CBs, go wait some more. */
2340 if (!my_rdp->nocb_follower_head)
2341 goto wait_again;
2342}
2343
2344/*
2345 * Followers come here to wait for additional callbacks to show up.
2346 * This function does not return until callbacks appear.
2347 */
2348static void nocb_follower_wait(struct rcu_data *rdp)
2349{
2350 bool firsttime = true;
2351
2352 for (;;) {
2353 if (!rcu_nocb_poll) {
2354 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2355 "FollowerSleep");
2356 wait_event_interruptible(rdp->nocb_wq,
2357 ACCESS_ONCE(rdp->nocb_follower_head));
2358 } else if (firsttime) {
2359 /* Don't drown trace log with "Poll"! */
2360 firsttime = false;
2361 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
2362 }
2363 if (smp_load_acquire(&rdp->nocb_follower_head)) {
2364 /* ^^^ Ensure CB invocation follows _head test. */
2365 return;
2366 }
2367 if (!rcu_nocb_poll)
2368 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2369 "WokeEmpty");
2370 flush_signals(current);
2371 schedule_timeout_interruptible(1);
2372 }
2373}
2374
2375/*
2216 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2376 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2217 * callbacks queued by the corresponding no-CBs CPU. 2377 * callbacks queued by the corresponding no-CBs CPU, however, there is
2378 * an optional leader-follower relationship so that the grace-period
2379 * kthreads don't have to do quite so many wakeups.
2218 */ 2380 */
2219static int rcu_nocb_kthread(void *arg) 2381static int rcu_nocb_kthread(void *arg)
2220{ 2382{
2221 int c, cl; 2383 int c, cl;
2222 bool firsttime = 1;
2223 struct rcu_head *list; 2384 struct rcu_head *list;
2224 struct rcu_head *next; 2385 struct rcu_head *next;
2225 struct rcu_head **tail; 2386 struct rcu_head **tail;
@@ -2227,41 +2388,22 @@ static int rcu_nocb_kthread(void *arg)
2227 2388
2228 /* Each pass through this loop invokes one batch of callbacks */ 2389 /* Each pass through this loop invokes one batch of callbacks */
2229 for (;;) { 2390 for (;;) {
2230 /* If not polling, wait for next batch of callbacks. */ 2391 /* Wait for callbacks. */
2231 if (!rcu_nocb_poll) { 2392 if (rdp->nocb_leader == rdp)
2232 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2393 nocb_leader_wait(rdp);
2233 TPS("Sleep")); 2394 else
2234 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); 2395 nocb_follower_wait(rdp);
2235 /* Memory barrier provide by xchg() below. */ 2396
2236 } else if (firsttime) { 2397 /* Pull the ready-to-invoke callbacks onto local list. */
2237 firsttime = 0; 2398 list = ACCESS_ONCE(rdp->nocb_follower_head);
2238 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2399 BUG_ON(!list);
2239 TPS("Poll")); 2400 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2240 } 2401 ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2241 list = ACCESS_ONCE(rdp->nocb_head); 2402 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2242 if (!list) { 2403 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
2243 if (!rcu_nocb_poll) 2404 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
2244 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2405 rdp->nocb_p_count += c;
2245 TPS("WokeEmpty")); 2406 rdp->nocb_p_count_lazy += cl;
2246 schedule_timeout_interruptible(1);
2247 flush_signals(current);
2248 continue;
2249 }
2250 firsttime = 1;
2251 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2252 TPS("WokeNonEmpty"));
2253
2254 /*
2255 * Extract queued callbacks, update counts, and wait
2256 * for a grace period to elapse.
2257 */
2258 ACCESS_ONCE(rdp->nocb_head) = NULL;
2259 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2260 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2261 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2262 ACCESS_ONCE(rdp->nocb_p_count) += c;
2263 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2264 rcu_nocb_wait_gp(rdp);
2265 2407
2266 /* Each pass through the following loop invokes a callback. */ 2408 /* Each pass through the following loop invokes a callback. */
2267 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2409 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
@@ -2305,7 +2447,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2305 if (!rcu_nocb_need_deferred_wakeup(rdp)) 2447 if (!rcu_nocb_need_deferred_wakeup(rdp))
2306 return; 2448 return;
2307 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; 2449 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
2308 wake_up(&rdp->nocb_wq); 2450 wake_nocb_leader(rdp, false);
2309 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); 2451 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
2310} 2452}
2311 2453
@@ -2314,19 +2456,57 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2314{ 2456{
2315 rdp->nocb_tail = &rdp->nocb_head; 2457 rdp->nocb_tail = &rdp->nocb_head;
2316 init_waitqueue_head(&rdp->nocb_wq); 2458 init_waitqueue_head(&rdp->nocb_wq);
2459 rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2317} 2460}
2318 2461
2319/* Create a kthread for each RCU flavor for each no-CBs CPU. */ 2462/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
2463static int rcu_nocb_leader_stride = -1;
2464module_param(rcu_nocb_leader_stride, int, 0444);
2465
2466/*
2467 * Create a kthread for each RCU flavor for each no-CBs CPU.
2468 * Also initialize leader-follower relationships.
2469 */
2320static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2470static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2321{ 2471{
2322 int cpu; 2472 int cpu;
2473 int ls = rcu_nocb_leader_stride;
2474 int nl = 0; /* Next leader. */
2323 struct rcu_data *rdp; 2475 struct rcu_data *rdp;
2476 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
2477 struct rcu_data *rdp_prev = NULL;
2324 struct task_struct *t; 2478 struct task_struct *t;
2325 2479
2326 if (rcu_nocb_mask == NULL) 2480 if (rcu_nocb_mask == NULL)
2327 return; 2481 return;
2482#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
2483 if (tick_nohz_full_running)
2484 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2485#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
2486 if (ls == -1) {
2487 ls = int_sqrt(nr_cpu_ids);
2488 rcu_nocb_leader_stride = ls;
2489 }
2490
2491 /*
2492 * Each pass through this loop sets up one rcu_data structure and
2493 * spawns one rcu_nocb_kthread().
2494 */
2328 for_each_cpu(cpu, rcu_nocb_mask) { 2495 for_each_cpu(cpu, rcu_nocb_mask) {
2329 rdp = per_cpu_ptr(rsp->rda, cpu); 2496 rdp = per_cpu_ptr(rsp->rda, cpu);
2497 if (rdp->cpu >= nl) {
2498 /* New leader, set up for followers & next leader. */
2499 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2500 rdp->nocb_leader = rdp;
2501 rdp_leader = rdp;
2502 } else {
2503 /* Another follower, link to previous leader. */
2504 rdp->nocb_leader = rdp_leader;
2505 rdp_prev->nocb_next_follower = rdp;
2506 }
2507 rdp_prev = rdp;
2508
2509 /* Spawn the kthread for this CPU. */
2330 t = kthread_run(rcu_nocb_kthread, rdp, 2510 t = kthread_run(rcu_nocb_kthread, rdp,
2331 "rcuo%c/%d", rsp->abbr, cpu); 2511 "rcuo%c/%d", rsp->abbr, cpu);
2332 BUG_ON(IS_ERR(t)); 2512 BUG_ON(IS_ERR(t));
@@ -2404,7 +2584,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2404 * if an adaptive-ticks CPU is failing to respond to the current grace 2584 * if an adaptive-ticks CPU is failing to respond to the current grace
2405 * period and has not be idle from an RCU perspective, kick it. 2585 * period and has not be idle from an RCU perspective, kick it.
2406 */ 2586 */
2407static void rcu_kick_nohz_cpu(int cpu) 2587static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2408{ 2588{
2409#ifdef CONFIG_NO_HZ_FULL 2589#ifdef CONFIG_NO_HZ_FULL
2410 if (tick_nohz_full_cpu(cpu)) 2590 if (tick_nohz_full_cpu(cpu))
@@ -2843,12 +3023,16 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2843 */ 3023 */
2844static void rcu_bind_gp_kthread(void) 3024static void rcu_bind_gp_kthread(void)
2845{ 3025{
2846#ifdef CONFIG_NO_HZ_FULL 3026 int __maybe_unused cpu;
2847 int cpu = ACCESS_ONCE(tick_do_timer_cpu);
2848 3027
2849 if (cpu < 0 || cpu >= nr_cpu_ids) 3028 if (!tick_nohz_full_enabled())
2850 return; 3029 return;
2851 if (raw_smp_processor_id() != cpu) 3030#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
3031 cpu = tick_do_timer_cpu;
3032 if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
2852 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 3033 set_cpus_allowed_ptr(current, cpumask_of(cpu));
2853#endif /* #ifdef CONFIG_NO_HZ_FULL */ 3034#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3035 if (!is_housekeeping_cpu(raw_smp_processor_id()))
3036 housekeeping_affine(current);
3037#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2854} 3038}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..4056d7992a6c 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -90,9 +90,6 @@ void __rcu_read_unlock(void)
90 } else { 90 } else {
91 barrier(); /* critical section before exit code. */ 91 barrier(); /* critical section before exit code. */
92 t->rcu_read_lock_nesting = INT_MIN; 92 t->rcu_read_lock_nesting = INT_MIN;
93#ifdef CONFIG_PROVE_RCU_DELAY
94 udelay(10); /* Make preemption more probable. */
95#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
96 barrier(); /* assign before ->rcu_read_unlock_special load */ 93 barrier(); /* assign before ->rcu_read_unlock_special load */
97 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 94 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
98 rcu_read_unlock_special(t); 95 rcu_read_unlock_special(t);
@@ -200,12 +197,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
200EXPORT_SYMBOL_GPL(wait_rcu_gp); 197EXPORT_SYMBOL_GPL(wait_rcu_gp);
201 198
202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 199#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
203static inline void debug_init_rcu_head(struct rcu_head *head) 200void init_rcu_head(struct rcu_head *head)
204{ 201{
205 debug_object_init(head, &rcuhead_debug_descr); 202 debug_object_init(head, &rcuhead_debug_descr);
206} 203}
207 204
208static inline void debug_rcu_head_free(struct rcu_head *head) 205void destroy_rcu_head(struct rcu_head *head)
209{ 206{
210 debug_object_free(head, &rcuhead_debug_descr); 207 debug_object_free(head, &rcuhead_debug_descr);
211} 208}
@@ -350,21 +347,3 @@ static int __init check_cpu_stall_init(void)
350early_initcall(check_cpu_stall_init); 347early_initcall(check_cpu_stall_init);
351 348
352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 349#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
353
354/*
355 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
356 */
357
358DEFINE_PER_CPU(int, rcu_cond_resched_count);
359
360/*
361 * Report a set of RCU quiescent states, for use by cond_resched()
362 * and friends. Out of line due to being called infrequently.
363 */
364void rcu_resched(void)
365{
366 preempt_disable();
367 __this_cpu_write(rcu_cond_resched_count, 0);
368 rcu_note_context_switch(smp_processor_id());
369 preempt_enable();
370}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
4147 4147
4148int __sched _cond_resched(void) 4148int __sched _cond_resched(void)
4149{ 4149{
4150 rcu_cond_resched();
4151 if (should_resched()) { 4150 if (should_resched()) {
4152 __cond_resched(); 4151 __cond_resched();
4153 return 1; 4152 return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
4166 */ 4165 */
4167int __cond_resched_lock(spinlock_t *lock) 4166int __cond_resched_lock(spinlock_t *lock)
4168{ 4167{
4169 bool need_rcu_resched = rcu_should_resched();
4170 int resched = should_resched(); 4168 int resched = should_resched();
4171 int ret = 0; 4169 int ret = 0;
4172 4170
4173 lockdep_assert_held(lock); 4171 lockdep_assert_held(lock);
4174 4172
4175 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4173 if (spin_needbreak(lock) || resched) {
4176 spin_unlock(lock); 4174 spin_unlock(lock);
4177 if (resched) 4175 if (resched)
4178 __cond_resched(); 4176 __cond_resched();
4179 else if (unlikely(need_rcu_resched))
4180 rcu_resched();
4181 else 4177 else
4182 cpu_relax(); 4178 cpu_relax();
4183 ret = 1; 4179 ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
4191{ 4187{
4192 BUG_ON(!in_softirq()); 4188 BUG_ON(!in_softirq());
4193 4189
4194 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4195 if (should_resched()) { 4190 if (should_resched()) {
4196 local_bh_enable(); 4191 local_bh_enable();
4197 __cond_resched(); 4192 __cond_resched();
diff --git a/kernel/signal.c b/kernel/signal.c
index a4077e90f19f..40b76e351e64 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1263,6 +1263,10 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1263 struct sighand_struct *sighand; 1263 struct sighand_struct *sighand;
1264 1264
1265 for (;;) { 1265 for (;;) {
1266 /*
1267 * Disable interrupts early to avoid deadlocks.
1268 * See rcu_read_unlock() comment header for details.
1269 */
1266 local_irq_save(*flags); 1270 local_irq_save(*flags);
1267 rcu_read_lock(); 1271 rcu_read_lock();
1268 sighand = rcu_dereference(tsk->sighand); 1272 sighand = rcu_dereference(tsk->sighand);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6558b7ac112d..f784d83e29f1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -154,6 +154,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
154 154
155#ifdef CONFIG_NO_HZ_FULL 155#ifdef CONFIG_NO_HZ_FULL
156cpumask_var_t tick_nohz_full_mask; 156cpumask_var_t tick_nohz_full_mask;
157cpumask_var_t housekeeping_mask;
157bool tick_nohz_full_running; 158bool tick_nohz_full_running;
158 159
159static bool can_stop_full_tick(void) 160static bool can_stop_full_tick(void)
@@ -281,6 +282,7 @@ static int __init tick_nohz_full_setup(char *str)
281 int cpu; 282 int cpu;
282 283
283 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 284 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
285 alloc_bootmem_cpumask_var(&housekeeping_mask);
284 if (cpulist_parse(str, tick_nohz_full_mask) < 0) { 286 if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
285 pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); 287 pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
286 return 1; 288 return 1;
@@ -291,6 +293,8 @@ static int __init tick_nohz_full_setup(char *str)
291 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); 293 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
292 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 294 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
293 } 295 }
296 cpumask_andnot(housekeeping_mask,
297 cpu_possible_mask, tick_nohz_full_mask);
294 tick_nohz_full_running = true; 298 tick_nohz_full_running = true;
295 299
296 return 1; 300 return 1;
@@ -332,9 +336,15 @@ static int tick_nohz_init_all(void)
332 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); 336 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
333 return err; 337 return err;
334 } 338 }
339 if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
340 pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
341 return err;
342 }
335 err = 0; 343 err = 0;
336 cpumask_setall(tick_nohz_full_mask); 344 cpumask_setall(tick_nohz_full_mask);
337 cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask); 345 cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
346 cpumask_clear(housekeeping_mask);
347 cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
338 tick_nohz_full_running = true; 348 tick_nohz_full_running = true;
339#endif 349#endif
340 return err; 350 return err;
diff --git a/kernel/torture.c b/kernel/torture.c
index 40bb511cca48..d600af21f022 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -708,7 +708,7 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
708 int ret = 0; 708 int ret = 0;
709 709
710 VERBOSE_TOROUT_STRING(m); 710 VERBOSE_TOROUT_STRING(m);
711 *tp = kthread_run(fn, arg, s); 711 *tp = kthread_run(fn, arg, "%s", s);
712 if (IS_ERR(*tp)) { 712 if (IS_ERR(*tp)) {
713 ret = PTR_ERR(*tp); 713 ret = PTR_ERR(*tp);
714 VERBOSE_TOROUT_ERRSTRING(f); 714 VERBOSE_TOROUT_ERRSTRING(f);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7a638aa3545b..12132e433e30 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1131,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY
1131 1131
1132 Say N if you are unsure. 1132 Say N if you are unsure.
1133 1133
1134config PROVE_RCU_DELAY
1135 bool "RCU debugging: preemptible RCU race provocation"
1136 depends on DEBUG_KERNEL && PREEMPT_RCU
1137 default n
1138 help
1139 There is a class of races that involve an unlikely preemption
1140 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
1141 been set to INT_MIN. This feature inserts a delay at that
1142 point to increase the probability of these races.
1143
1144 Say Y to increase probability of preemption of __rcu_read_unlock().
1145
1146 Say N if you are unsure.
1147
1148config SPARSE_RCU_POINTER 1134config SPARSE_RCU_POINTER
1149 bool "RCU debugging: sparse-based checks for pointer usage" 1135 bool "RCU debugging: sparse-based checks for pointer usage"
1150 default n 1136 default n
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 41987885bd31..d7016279ec2b 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -21,6 +21,7 @@ my $lk_path = "./";
21my $email = 1; 21my $email = 1;
22my $email_usename = 1; 22my $email_usename = 1;
23my $email_maintainer = 1; 23my $email_maintainer = 1;
24my $email_reviewer = 1;
24my $email_list = 1; 25my $email_list = 1;
25my $email_subscriber_list = 0; 26my $email_subscriber_list = 0;
26my $email_git_penguin_chiefs = 0; 27my $email_git_penguin_chiefs = 0;
@@ -202,6 +203,7 @@ if (!GetOptions(
202 'remove-duplicates!' => \$email_remove_duplicates, 203 'remove-duplicates!' => \$email_remove_duplicates,
203 'mailmap!' => \$email_use_mailmap, 204 'mailmap!' => \$email_use_mailmap,
204 'm!' => \$email_maintainer, 205 'm!' => \$email_maintainer,
206 'r!' => \$email_reviewer,
205 'n!' => \$email_usename, 207 'n!' => \$email_usename,
206 'l!' => \$email_list, 208 'l!' => \$email_list,
207 's!' => \$email_subscriber_list, 209 's!' => \$email_subscriber_list,
@@ -260,7 +262,8 @@ if ($sections) {
260} 262}
261 263
262if ($email && 264if ($email &&
263 ($email_maintainer + $email_list + $email_subscriber_list + 265 ($email_maintainer + $email_reviewer +
266 $email_list + $email_subscriber_list +
264 $email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) { 267 $email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) {
265 die "$P: Please select at least 1 email option\n"; 268 die "$P: Please select at least 1 email option\n";
266} 269}
@@ -750,6 +753,7 @@ MAINTAINER field selection options:
750 --hg-since => hg history to use (default: $email_hg_since) 753 --hg-since => hg history to use (default: $email_hg_since)
751 --interactive => display a menu (mostly useful if used with the --git option) 754 --interactive => display a menu (mostly useful if used with the --git option)
752 --m => include maintainer(s) if any 755 --m => include maintainer(s) if any
756 --r => include reviewer(s) if any
753 --n => include name 'Full Name <addr\@domain.tld>' 757 --n => include name 'Full Name <addr\@domain.tld>'
754 --l => include list(s) if any 758 --l => include list(s) if any
755 --s => include subscriber only list(s) if any 759 --s => include subscriber only list(s) if any
@@ -1064,6 +1068,22 @@ sub add_categories {
1064 my $role = get_maintainer_role($i); 1068 my $role = get_maintainer_role($i);
1065 push_email_addresses($pvalue, $role); 1069 push_email_addresses($pvalue, $role);
1066 } 1070 }
1071 } elsif ($ptype eq "R") {
1072 my ($name, $address) = parse_email($pvalue);
1073 if ($name eq "") {
1074 if ($i > 0) {
1075 my $tv = $typevalue[$i - 1];
1076 if ($tv =~ m/^(\C):\s*(.*)/) {
1077 if ($1 eq "P") {
1078 $name = $2;
1079 $pvalue = format_email($name, $address, $email_usename);
1080 }
1081 }
1082 }
1083 }
1084 if ($email_reviewer) {
1085 push_email_addresses($pvalue, 'reviewer');
1086 }
1067 } elsif ($ptype eq "T") { 1087 } elsif ($ptype eq "T") {
1068 push(@scm, $pvalue); 1088 push(@scm, $pvalue);
1069 } elsif ($ptype eq "W") { 1089 } elsif ($ptype eq "W") {
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index ee1f6cae3d70..3f6c9b78d177 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -54,10 +54,16 @@ do
54 if test -f "$i/qemu-cmd" 54 if test -f "$i/qemu-cmd"
55 then 55 then
56 print_bug qemu failed 56 print_bug qemu failed
57 echo " $i"
58 elif test -f "$i/buildonly"
59 then
60 echo Build-only run, no boot/test
61 configcheck.sh $i/.config $i/ConfigFragment
62 parse-build.sh $i/Make.out $configfile
57 else 63 else
58 print_bug Build failed 64 print_bug Build failed
65 echo " $i"
59 fi 66 fi
60 echo " $i"
61 fi 67 fi
62 done 68 done
63done 69done
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 27e544e29510..0f69dcbf9def 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -42,6 +42,7 @@ grace=120
42 42
43T=/tmp/kvm-test-1-run.sh.$$ 43T=/tmp/kvm-test-1-run.sh.$$
44trap 'rm -rf $T' 0 44trap 'rm -rf $T' 0
45touch $T
45 46
46. $KVM/bin/functions.sh 47. $KVM/bin/functions.sh
47. $KVPATH/ver_functions.sh 48. $KVPATH/ver_functions.sh
@@ -131,7 +132,10 @@ boot_args=$6
131 132
132cd $KVM 133cd $KVM
133kstarttime=`awk 'BEGIN { print systime() }' < /dev/null` 134kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
134echo ' ---' `date`: Starting kernel 135if test -z "$TORTURE_BUILDONLY"
136then
137 echo ' ---' `date`: Starting kernel
138fi
135 139
136# Generate -smp qemu argument. 140# Generate -smp qemu argument.
137qemu_args="-nographic $qemu_args" 141qemu_args="-nographic $qemu_args"
@@ -157,12 +161,13 @@ boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
157# Generate kernel-version-specific boot parameters 161# Generate kernel-version-specific boot parameters
158boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`" 162boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
159 163
160echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
161if test -n "$TORTURE_BUILDONLY" 164if test -n "$TORTURE_BUILDONLY"
162then 165then
163 echo Build-only run specified, boot/test omitted. 166 echo Build-only run specified, boot/test omitted.
167 touch $resdir/buildonly
164 exit 0 168 exit 0
165fi 169fi
170echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
166( $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & 171( $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
167qemu_pid=$! 172qemu_pid=$!
168commandcompleted=0 173commandcompleted=0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 40285c58653e..589e9c38413b 100644
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -340,12 +340,18 @@ function dump(first, pastlast)
340 for (j = 1; j < jn; j++) { 340 for (j = 1; j < jn; j++) {
341 builddir=KVM "/b" j 341 builddir=KVM "/b" j
342 print "rm -f " builddir ".ready" 342 print "rm -f " builddir ".ready"
343 print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`"; 343 print "if test -z \"$TORTURE_BUILDONLY\""
344 print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log"; 344 print "then"
345 print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
346 print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
347 print "fi"
345 } 348 }
346 print "wait" 349 print "wait"
347 print "echo ---- All kernel runs complete. `date`"; 350 print "if test -z \"$TORTURE_BUILDONLY\""
348 print "echo ---- All kernel runs complete. `date` >> " rd "/log"; 351 print "then"
352 print "\techo ---- All kernel runs complete. `date`";
353 print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
354 print "fi"
349 for (j = 1; j < jn; j++) { 355 for (j = 1; j < jn; j++) {
350 builddir=KVM "/b" j 356 builddir=KVM "/b" j
351 print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:"; 357 print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
@@ -385,10 +391,7 @@ echo
385echo 391echo
386echo " --- `date` Test summary:" 392echo " --- `date` Test summary:"
387echo Results directory: $resdir/$ds 393echo Results directory: $resdir/$ds
388if test -z "$TORTURE_BUILDONLY" 394kvm-recheck.sh $resdir/$ds
389then
390 kvm-recheck.sh $resdir/$ds
391fi
392___EOF___ 395___EOF___
393 396
394if test "$dryrun" = script 397if test "$dryrun" = script
@@ -403,7 +406,7 @@ then
403 sed -e 's/:.*$//' -e 's/^echo //' 406 sed -e 's/:.*$//' -e 's/^echo //'
404 exit 0 407 exit 0
405else 408else
406 # Not a dryru, so run the script. 409 # Not a dryrun, so run the script.
407 sh $T/script 410 sh $T/script
408fi 411fi
409 412
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 9c827ec59a97..063b7079c621 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -15,7 +15,6 @@ CONFIG_RCU_FANOUT_EXACT=n
15CONFIG_RCU_NOCB_CPU=y 15CONFIG_RCU_NOCB_CPU=y
16CONFIG_RCU_NOCB_CPU_ZERO=y 16CONFIG_RCU_NOCB_CPU_ZERO=y
17CONFIG_DEBUG_LOCK_ALLOC=n 17CONFIG_DEBUG_LOCK_ALLOC=n
18CONFIG_PROVE_RCU_DELAY=n
19CONFIG_RCU_CPU_STALL_INFO=n 18CONFIG_RCU_CPU_STALL_INFO=n
20CONFIG_RCU_CPU_STALL_VERBOSE=n 19CONFIG_RCU_CPU_STALL_VERBOSE=n
21CONFIG_RCU_BOOST=n 20CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
index 1a777b5f68b5..ea119ba2f7d4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_EXACT=n
18CONFIG_RCU_NOCB_CPU=n 18CONFIG_RCU_NOCB_CPU=n
19CONFIG_DEBUG_LOCK_ALLOC=y 19CONFIG_DEBUG_LOCK_ALLOC=y
20CONFIG_PROVE_LOCKING=n 20CONFIG_PROVE_LOCKING=n
21CONFIG_PROVE_RCU_DELAY=n
22CONFIG_RCU_CPU_STALL_INFO=n 21CONFIG_RCU_CPU_STALL_INFO=n
23CONFIG_RCU_CPU_STALL_VERBOSE=y 22CONFIG_RCU_CPU_STALL_VERBOSE=y
24CONFIG_RCU_BOOST=n 23CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
index 61c8d9ce5bb2..19cf9485f48a 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_EXACT=n
18CONFIG_RCU_NOCB_CPU=n 18CONFIG_RCU_NOCB_CPU=n
19CONFIG_DEBUG_LOCK_ALLOC=y 19CONFIG_DEBUG_LOCK_ALLOC=y
20CONFIG_PROVE_LOCKING=n 20CONFIG_PROVE_LOCKING=n
21CONFIG_PROVE_RCU_DELAY=n
22CONFIG_RCU_CPU_STALL_INFO=n 21CONFIG_RCU_CPU_STALL_INFO=n
23CONFIG_RCU_CPU_STALL_VERBOSE=y 22CONFIG_RCU_CPU_STALL_VERBOSE=y
24CONFIG_RCU_BOOST=n 23CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
index c1f111c1561b..f4567fb3e332 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
@@ -14,7 +14,6 @@ CONFIG_RCU_FANOUT_LEAF=4
14CONFIG_RCU_FANOUT_EXACT=n 14CONFIG_RCU_FANOUT_EXACT=n
15CONFIG_RCU_NOCB_CPU=n 15CONFIG_RCU_NOCB_CPU=n
16CONFIG_DEBUG_LOCK_ALLOC=n 16CONFIG_DEBUG_LOCK_ALLOC=n
17CONFIG_PROVE_RCU_DELAY=n
18CONFIG_RCU_CPU_STALL_INFO=n 17CONFIG_RCU_CPU_STALL_INFO=n
19CONFIG_RCU_CPU_STALL_VERBOSE=n 18CONFIG_RCU_CPU_STALL_VERBOSE=n
20CONFIG_RCU_BOOST=y 19CONFIG_RCU_BOOST=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 7dbd27ce17a4..0a262fbb0c12 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
18CONFIG_RCU_FANOUT_EXACT=n 18CONFIG_RCU_FANOUT_EXACT=n
19CONFIG_RCU_NOCB_CPU=n 19CONFIG_RCU_NOCB_CPU=n
20CONFIG_DEBUG_LOCK_ALLOC=n 20CONFIG_DEBUG_LOCK_ALLOC=n
21CONFIG_PROVE_RCU_DELAY=n
22CONFIG_RCU_CPU_STALL_INFO=y 21CONFIG_RCU_CPU_STALL_INFO=y
23CONFIG_RCU_CPU_STALL_VERBOSE=y 22CONFIG_RCU_CPU_STALL_VERBOSE=y
24CONFIG_DEBUG_OBJECTS_RCU_HEAD=n 23CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
index d0f32e574743..3a06b97e9a73 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
@@ -18,7 +18,6 @@ CONFIG_RCU_NOCB_CPU_NONE=y
18CONFIG_DEBUG_LOCK_ALLOC=y 18CONFIG_DEBUG_LOCK_ALLOC=y
19CONFIG_PROVE_LOCKING=y 19CONFIG_PROVE_LOCKING=y
20CONFIG_PROVE_RCU=y 20CONFIG_PROVE_RCU=y
21CONFIG_PROVE_RCU_DELAY=y
22CONFIG_RCU_CPU_STALL_INFO=n 21CONFIG_RCU_CPU_STALL_INFO=n
23CONFIG_RCU_CPU_STALL_VERBOSE=n 22CONFIG_RCU_CPU_STALL_VERBOSE=n
24CONFIG_DEBUG_OBJECTS_RCU_HEAD=n 23CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 2e477dfb9c57..8f084cca91bf 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -19,7 +19,6 @@ CONFIG_RCU_NOCB_CPU=n
19CONFIG_DEBUG_LOCK_ALLOC=y 19CONFIG_DEBUG_LOCK_ALLOC=y
20CONFIG_PROVE_LOCKING=y 20CONFIG_PROVE_LOCKING=y
21CONFIG_PROVE_RCU=y 21CONFIG_PROVE_RCU=y
22CONFIG_PROVE_RCU_DELAY=n
23CONFIG_RCU_CPU_STALL_INFO=n 22CONFIG_RCU_CPU_STALL_INFO=n
24CONFIG_RCU_CPU_STALL_VERBOSE=n 23CONFIG_RCU_CPU_STALL_VERBOSE=n
25CONFIG_DEBUG_OBJECTS_RCU_HEAD=y 24CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index 042f86ef362a..ab6225506909 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -17,7 +17,6 @@ CONFIG_RCU_FANOUT_LEAF=2
17CONFIG_RCU_FANOUT_EXACT=n 17CONFIG_RCU_FANOUT_EXACT=n
18CONFIG_RCU_NOCB_CPU=n 18CONFIG_RCU_NOCB_CPU=n
19CONFIG_DEBUG_LOCK_ALLOC=n 19CONFIG_DEBUG_LOCK_ALLOC=n
20CONFIG_PROVE_RCU_DELAY=n
21CONFIG_RCU_CPU_STALL_INFO=y 20CONFIG_RCU_CPU_STALL_INFO=y
22CONFIG_RCU_CPU_STALL_VERBOSE=n 21CONFIG_RCU_CPU_STALL_VERBOSE=n
23CONFIG_DEBUG_OBJECTS_RCU_HEAD=n 22CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
index 3438cee1e3c5..69a2e255bf98 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
18CONFIG_RCU_NOCB_CPU=y 18CONFIG_RCU_NOCB_CPU=y
19CONFIG_RCU_NOCB_CPU_ALL=y 19CONFIG_RCU_NOCB_CPU_ALL=y
20CONFIG_DEBUG_LOCK_ALLOC=n 20CONFIG_DEBUG_LOCK_ALLOC=n
21CONFIG_PROVE_RCU_DELAY=n
22CONFIG_RCU_CPU_STALL_INFO=n 21CONFIG_RCU_CPU_STALL_INFO=n
23CONFIG_RCU_CPU_STALL_VERBOSE=n 22CONFIG_RCU_CPU_STALL_VERBOSE=n
24CONFIG_RCU_BOOST=n 23CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
index bf4523d3e44c..a0f32fb8f17e 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
18CONFIG_RCU_NOCB_CPU=y 18CONFIG_RCU_NOCB_CPU=y
19CONFIG_RCU_NOCB_CPU_ALL=y 19CONFIG_RCU_NOCB_CPU_ALL=y
20CONFIG_DEBUG_LOCK_ALLOC=n 20CONFIG_DEBUG_LOCK_ALLOC=n
21CONFIG_PROVE_RCU_DELAY=n
22CONFIG_RCU_CPU_STALL_INFO=n 21CONFIG_RCU_CPU_STALL_INFO=n
23CONFIG_RCU_CPU_STALL_VERBOSE=n 22CONFIG_RCU_CPU_STALL_VERBOSE=n
24CONFIG_RCU_BOOST=n 23CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
index 81e4f7c0bf0b..b7a62a540ad1 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
@@ -13,7 +13,6 @@ CONFIG_SUSPEND=n
13CONFIG_HIBERNATION=n 13CONFIG_HIBERNATION=n
14CONFIG_RCU_NOCB_CPU=n 14CONFIG_RCU_NOCB_CPU=n
15CONFIG_DEBUG_LOCK_ALLOC=n 15CONFIG_DEBUG_LOCK_ALLOC=n
16CONFIG_PROVE_RCU_DELAY=n
17CONFIG_RCU_CPU_STALL_INFO=n 16CONFIG_RCU_CPU_STALL_INFO=n
18CONFIG_RCU_CPU_STALL_VERBOSE=n 17CONFIG_RCU_CPU_STALL_VERBOSE=n
19CONFIG_RCU_BOOST=n 18CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
index ef624ce73d8e..a55c00877fe4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
13CONFIG_PREEMPT=y 13CONFIG_PREEMPT=y
14#CHECK#CONFIG_TREE_PREEMPT_RCU=y 14#CHECK#CONFIG_TREE_PREEMPT_RCU=y
15CONFIG_DEBUG_KERNEL=y 15CONFIG_DEBUG_KERNEL=y
16CONFIG_PROVE_RCU_DELAY=y
17CONFIG_DEBUG_OBJECTS=y 16CONFIG_DEBUG_OBJECTS=y
18CONFIG_DEBUG_OBJECTS_RCU_HEAD=y 17CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
19CONFIG_RT_MUTEXES=y 18CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
index ef624ce73d8e..a55c00877fe4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
13CONFIG_PREEMPT=y 13CONFIG_PREEMPT=y
14#CHECK#CONFIG_TREE_PREEMPT_RCU=y 14#CHECK#CONFIG_TREE_PREEMPT_RCU=y
15CONFIG_DEBUG_KERNEL=y 15CONFIG_DEBUG_KERNEL=y
16CONFIG_PROVE_RCU_DELAY=y
17CONFIG_DEBUG_OBJECTS=y 16CONFIG_DEBUG_OBJECTS=y
18CONFIG_DEBUG_OBJECTS_RCU_HEAD=y 17CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
19CONFIG_RT_MUTEXES=y 18CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
index ef624ce73d8e..a55c00877fe4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
13CONFIG_PREEMPT=y 13CONFIG_PREEMPT=y
14#CHECK#CONFIG_TREE_PREEMPT_RCU=y 14#CHECK#CONFIG_TREE_PREEMPT_RCU=y
15CONFIG_DEBUG_KERNEL=y 15CONFIG_DEBUG_KERNEL=y
16CONFIG_PROVE_RCU_DELAY=y
17CONFIG_DEBUG_OBJECTS=y 16CONFIG_DEBUG_OBJECTS=y
18CONFIG_DEBUG_OBJECTS_RCU_HEAD=y 17CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
19CONFIG_RT_MUTEXES=y 18CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
index ef624ce73d8e..a55c00877fe4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
+++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
13CONFIG_PREEMPT=y 13CONFIG_PREEMPT=y
14#CHECK#CONFIG_TREE_PREEMPT_RCU=y 14#CHECK#CONFIG_TREE_PREEMPT_RCU=y
15CONFIG_DEBUG_KERNEL=y 15CONFIG_DEBUG_KERNEL=y
16CONFIG_PROVE_RCU_DELAY=y
17CONFIG_DEBUG_OBJECTS=y 16CONFIG_DEBUG_OBJECTS=y
18CONFIG_DEBUG_OBJECTS_RCU_HEAD=y 17CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
19CONFIG_RT_MUTEXES=y 18CONFIG_RT_MUTEXES=y
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index adbb76cffb49..3e588db86a17 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -14,7 +14,6 @@ CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
14CONFIG_PREEMPT -- Do half. (First three and #8.) 14CONFIG_PREEMPT -- Do half. (First three and #8.)
15CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not. 15CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not.
16CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING. 16CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING.
17CONFIG_PROVE_RCU_DELAY -- Do one.
18CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU. 17CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU.
19CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing. 18CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing.
20CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE. 19CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE.