summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-03-10 21:33:20 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-05-27 15:59:02 -0400
commit0f41c0ddadfb3d5baffe62351c380e2881aacd58 (patch)
treed0e4b7585f3016f68b6f9afd23342c4dba7e8f93 /kernel/rcu
parent3eaaaf6cd6d7fbaf552cc543ccb93d7da81f43ec (diff)
rcu: Provide diagnostic option to slow down grace-period scans
Grace-period scans of the rcu_node combining tree normally proceed quite quickly, so that it is very difficult to reproduce races against them. This commit therefore allows grace-period pre-initialization and cleanup to be artificially slowed down, increasing race-reproduction probability. A pair of pairs of new Kconfig parameters are provided, RCU_TORTURE_TEST_SLOW_PREINIT to enable the slowing down of propagating CPU-hotplug changes up the combining tree along with RCU_TORTURE_TEST_SLOW_PREINIT_DELAY to specify the delay in jiffies, and RCU_TORTURE_TEST_SLOW_CLEANUP to enable the slowing down of the end-of-grace-period cleanup scan along with RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY to specify the delay in jiffies. Boot-time parameters named rcutree.gp_preinit_delay and rcutree.gp_cleanup_delay allow these delays to be specified at boot time. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9b076b284695..2f3cb5513ca3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -163,6 +163,14 @@ static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
163module_param(kthread_prio, int, 0644); 163module_param(kthread_prio, int, 0644);
164 164
165/* Delay in jiffies for grace-period initialization delays, debug only. */ 165/* Delay in jiffies for grace-period initialization delays, debug only. */
166
167#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
168static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
169module_param(gp_preinit_delay, int, 0644);
170#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
171static const int gp_preinit_delay;
172#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
173
166#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT 174#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
167static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; 175static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
168module_param(gp_init_delay, int, 0644); 176module_param(gp_init_delay, int, 0644);
@@ -170,6 +178,13 @@ module_param(gp_init_delay, int, 0644);
170static const int gp_init_delay; 178static const int gp_init_delay;
171#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ 179#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
172 180
181#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
182static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
183module_param(gp_cleanup_delay, int, 0644);
184#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
185static const int gp_cleanup_delay;
186#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
187
173/* 188/*
174 * Number of grace periods between delays, normalized by the duration of 189 * Number of grace periods between delays, normalized by the duration of
175 * the delay. The longer the the delay, the more the grace periods between 190 * the delay. The longer the the delay, the more the grace periods between
@@ -1742,6 +1757,13 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1742 rcu_gp_kthread_wake(rsp); 1757 rcu_gp_kthread_wake(rsp);
1743} 1758}
1744 1759
1760static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1761{
1762 if (delay > 0 &&
1763 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1764 schedule_timeout_uninterruptible(delay);
1765}
1766
1745/* 1767/*
1746 * Initialize a new grace period. Return 0 if no grace period required. 1768 * Initialize a new grace period. Return 0 if no grace period required.
1747 */ 1769 */
@@ -1784,6 +1806,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1784 * will handle subsequent offline CPUs. 1806 * will handle subsequent offline CPUs.
1785 */ 1807 */
1786 rcu_for_each_leaf_node(rsp, rnp) { 1808 rcu_for_each_leaf_node(rsp, rnp) {
1809 rcu_gp_slow(rsp, gp_preinit_delay);
1787 raw_spin_lock_irq(&rnp->lock); 1810 raw_spin_lock_irq(&rnp->lock);
1788 smp_mb__after_unlock_lock(); 1811 smp_mb__after_unlock_lock();
1789 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1812 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1840,6 +1863,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1840 * process finishes, because this kthread handles both. 1863 * process finishes, because this kthread handles both.
1841 */ 1864 */
1842 rcu_for_each_node_breadth_first(rsp, rnp) { 1865 rcu_for_each_node_breadth_first(rsp, rnp) {
1866 rcu_gp_slow(rsp, gp_init_delay);
1843 raw_spin_lock_irq(&rnp->lock); 1867 raw_spin_lock_irq(&rnp->lock);
1844 smp_mb__after_unlock_lock(); 1868 smp_mb__after_unlock_lock();
1845 rdp = this_cpu_ptr(rsp->rda); 1869 rdp = this_cpu_ptr(rsp->rda);
@@ -1857,10 +1881,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
1857 raw_spin_unlock_irq(&rnp->lock); 1881 raw_spin_unlock_irq(&rnp->lock);
1858 cond_resched_rcu_qs(); 1882 cond_resched_rcu_qs();
1859 WRITE_ONCE(rsp->gp_activity, jiffies); 1883 WRITE_ONCE(rsp->gp_activity, jiffies);
1860 if (gp_init_delay > 0 &&
1861 !(rsp->gpnum %
1862 (rcu_num_nodes * PER_RCU_NODE_PERIOD * gp_init_delay)))
1863 schedule_timeout_uninterruptible(gp_init_delay);
1864 } 1884 }
1865 1885
1866 return 1; 1886 return 1;
@@ -1955,6 +1975,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1955 raw_spin_unlock_irq(&rnp->lock); 1975 raw_spin_unlock_irq(&rnp->lock);
1956 cond_resched_rcu_qs(); 1976 cond_resched_rcu_qs();
1957 WRITE_ONCE(rsp->gp_activity, jiffies); 1977 WRITE_ONCE(rsp->gp_activity, jiffies);
1978 rcu_gp_slow(rsp, gp_cleanup_delay);
1958 } 1979 }
1959 rnp = rcu_get_root(rsp); 1980 rnp = rcu_get_root(rsp);
1960 raw_spin_lock_irq(&rnp->lock); 1981 raw_spin_lock_irq(&rnp->lock);