aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-03-18 18:57:41 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-05-14 12:46:21 -0400
commita48f3fad4f97fe6a2522fe2f5b3054b4c48a8eac (patch)
tree3a335d65b9c58e11dbbe02e121186adb26648a3a /kernel
parente8d07a4ebc5c314d79df44310e0d81057278a310 (diff)
rcutorture: Add tests for get_state_synchronize_rcu()
This commit adds rcutorture testing for get_state_synchronize_rcu() and cond_synchronize_rcu(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/rcutorture.c130
1 files changed, 95 insertions, 35 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index dfec2582899f..0d27b9cc14e4 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -58,6 +58,7 @@ torture_param(int, fqs_duration, 0,
58 "Duration of fqs bursts (us), 0 to disable"); 58 "Duration of fqs bursts (us), 0 to disable");
59torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 59torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
60torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 60torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
61torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
61torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 62torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
62torture_param(bool, gp_normal, false, 63torture_param(bool, gp_normal, false,
63 "Use normal (non-expedited) GP wait primitives"); 64 "Use normal (non-expedited) GP wait primitives");
@@ -144,8 +145,10 @@ static int rcu_torture_writer_state;
144#define RTWS_REPLACE 2 145#define RTWS_REPLACE 2
145#define RTWS_DEF_FREE 3 146#define RTWS_DEF_FREE 3
146#define RTWS_EXP_SYNC 4 147#define RTWS_EXP_SYNC 4
147#define RTWS_STUTTER 5 148#define RTWS_COND_GET 5
148#define RTWS_STOPPING 6 149#define RTWS_COND_SYNC 6
150#define RTWS_STUTTER 7
151#define RTWS_STOPPING 8
149 152
150#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) 153#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
151#define RCUTORTURE_RUNNABLE_INIT 1 154#define RCUTORTURE_RUNNABLE_INIT 1
@@ -232,6 +235,8 @@ struct rcu_torture_ops {
232 void (*deferred_free)(struct rcu_torture *p); 235 void (*deferred_free)(struct rcu_torture *p);
233 void (*sync)(void); 236 void (*sync)(void);
234 void (*exp_sync)(void); 237 void (*exp_sync)(void);
238 unsigned long (*get_state)(void);
239 void (*cond_sync)(unsigned long oldstate);
235 void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 240 void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
236 void (*cb_barrier)(void); 241 void (*cb_barrier)(void);
237 void (*fqs)(void); 242 void (*fqs)(void);
@@ -283,10 +288,48 @@ static int rcu_torture_completed(void)
283 return rcu_batches_completed(); 288 return rcu_batches_completed();
284} 289}
285 290
291/*
292 * Update callback in the pipe. This should be invoked after a grace period.
293 */
294static bool
295rcu_torture_pipe_update_one(struct rcu_torture *rp)
296{
297 int i;
298
299 i = rp->rtort_pipe_count;
300 if (i > RCU_TORTURE_PIPE_LEN)
301 i = RCU_TORTURE_PIPE_LEN;
302 atomic_inc(&rcu_torture_wcount[i]);
303 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
304 rp->rtort_mbtest = 0;
305 return true;
306 }
307 return false;
308}
309
310/*
311 * Update all callbacks in the pipe. Suitable for synchronous grace-period
312 * primitives.
313 */
314static void
315rcu_torture_pipe_update(struct rcu_torture *old_rp)
316{
317 struct rcu_torture *rp;
318 struct rcu_torture *rp1;
319
320 if (old_rp)
321 list_add(&old_rp->rtort_free, &rcu_torture_removed);
322 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
323 if (rcu_torture_pipe_update_one(rp)) {
324 list_del(&rp->rtort_free);
325 rcu_torture_free(rp);
326 }
327 }
328}
329
286static void 330static void
287rcu_torture_cb(struct rcu_head *p) 331rcu_torture_cb(struct rcu_head *p)
288{ 332{
289 int i;
290 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 333 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
291 334
292 if (torture_must_stop_irq()) { 335 if (torture_must_stop_irq()) {
@@ -294,16 +337,10 @@ rcu_torture_cb(struct rcu_head *p)
294 /* The next initialization will pick up the pieces. */ 337 /* The next initialization will pick up the pieces. */
295 return; 338 return;
296 } 339 }
297 i = rp->rtort_pipe_count; 340 if (rcu_torture_pipe_update_one(rp))
298 if (i > RCU_TORTURE_PIPE_LEN)
299 i = RCU_TORTURE_PIPE_LEN;
300 atomic_inc(&rcu_torture_wcount[i]);
301 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
302 rp->rtort_mbtest = 0;
303 rcu_torture_free(rp); 341 rcu_torture_free(rp);
304 } else { 342 else
305 cur_ops->deferred_free(rp); 343 cur_ops->deferred_free(rp);
306 }
307} 344}
308 345
309static int rcu_no_completed(void) 346static int rcu_no_completed(void)
@@ -331,6 +368,8 @@ static struct rcu_torture_ops rcu_ops = {
331 .deferred_free = rcu_torture_deferred_free, 368 .deferred_free = rcu_torture_deferred_free,
332 .sync = synchronize_rcu, 369 .sync = synchronize_rcu,
333 .exp_sync = synchronize_rcu_expedited, 370 .exp_sync = synchronize_rcu_expedited,
371 .get_state = get_state_synchronize_rcu,
372 .cond_sync = cond_synchronize_rcu,
334 .call = call_rcu, 373 .call = call_rcu,
335 .cb_barrier = rcu_barrier, 374 .cb_barrier = rcu_barrier,
336 .fqs = rcu_force_quiescent_state, 375 .fqs = rcu_force_quiescent_state,
@@ -705,16 +744,39 @@ rcu_torture_fqs(void *arg)
705static int 744static int
706rcu_torture_writer(void *arg) 745rcu_torture_writer(void *arg)
707{ 746{
708 bool exp; 747 unsigned long gp_snap;
748 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
709 int i; 749 int i;
710 struct rcu_torture *rp; 750 struct rcu_torture *rp;
711 struct rcu_torture *rp1;
712 struct rcu_torture *old_rp; 751 struct rcu_torture *old_rp;
713 static DEFINE_TORTURE_RANDOM(rand); 752 static DEFINE_TORTURE_RANDOM(rand);
753 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET };
754 int nsynctypes = 0;
714 755
715 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 756 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
716 set_user_nice(current, MAX_NICE); 757 set_user_nice(current, MAX_NICE);
717 758
759 /* Initialize synctype[] array. If none set, take default. */
760 if (!gp_cond1 && !gp_exp1 && !gp_normal1)
761 gp_cond1 = gp_exp1 = gp_normal1 = true;
762 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
763 synctype[nsynctypes++] = RTWS_COND_GET;
764 else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync))
765 pr_alert("rcu_torture_writer: gp_cond without primitives.\n");
766 if (gp_exp1 && cur_ops->exp_sync)
767 synctype[nsynctypes++] = RTWS_EXP_SYNC;
768 else if (gp_exp && !cur_ops->exp_sync)
769 pr_alert("rcu_torture_writer: gp_exp without primitives.\n");
770 if (gp_normal1 && cur_ops->deferred_free)
771 synctype[nsynctypes++] = RTWS_DEF_FREE;
772 else if (gp_normal && !cur_ops->deferred_free)
773 pr_alert("rcu_torture_writer: gp_normal without primitives.\n");
774 if (WARN_ONCE(nsynctypes == 0,
775 "rcu_torture_writer: No update-side primitives.\n")) {
776 rcu_torture_writer_state = RTWS_STOPPING;
777 torture_kthread_stopping("rcu_torture_writer");
778 }
779
718 do { 780 do {
719 rcu_torture_writer_state = RTWS_FIXED_DELAY; 781 rcu_torture_writer_state = RTWS_FIXED_DELAY;
720 schedule_timeout_uninterruptible(1); 782 schedule_timeout_uninterruptible(1);
@@ -736,32 +798,30 @@ rcu_torture_writer(void *arg)
736 i = RCU_TORTURE_PIPE_LEN; 798 i = RCU_TORTURE_PIPE_LEN;
737 atomic_inc(&rcu_torture_wcount[i]); 799 atomic_inc(&rcu_torture_wcount[i]);
738 old_rp->rtort_pipe_count++; 800 old_rp->rtort_pipe_count++;
739 if (gp_normal == gp_exp) 801 switch (synctype[torture_random(&rand) % nsynctypes]) {
740 exp = !!(torture_random(&rand) & 0x80); 802 case RTWS_DEF_FREE:
741 else
742 exp = gp_exp;
743 if (!exp) {
744 rcu_torture_writer_state = RTWS_DEF_FREE; 803 rcu_torture_writer_state = RTWS_DEF_FREE;
745 cur_ops->deferred_free(old_rp); 804 cur_ops->deferred_free(old_rp);
746 } else { 805 break;
806 case RTWS_EXP_SYNC:
747 rcu_torture_writer_state = RTWS_EXP_SYNC; 807 rcu_torture_writer_state = RTWS_EXP_SYNC;
748 cur_ops->exp_sync(); 808 cur_ops->exp_sync();
749 list_add(&old_rp->rtort_free, 809 rcu_torture_pipe_update(old_rp);
750 &rcu_torture_removed); 810 break;
751 list_for_each_entry_safe(rp, rp1, 811 case RTWS_COND_GET:
752 &rcu_torture_removed, 812 rcu_torture_writer_state = RTWS_COND_GET;
753 rtort_free) { 813 gp_snap = cur_ops->get_state();
754 i = rp->rtort_pipe_count; 814 i = torture_random(&rand) % 16;
755 if (i > RCU_TORTURE_PIPE_LEN) 815 if (i != 0)
756 i = RCU_TORTURE_PIPE_LEN; 816 schedule_timeout_interruptible(i);
757 atomic_inc(&rcu_torture_wcount[i]); 817 udelay(torture_random(&rand) % 1000);
758 if (++rp->rtort_pipe_count >= 818 rcu_torture_writer_state = RTWS_COND_SYNC;
759 RCU_TORTURE_PIPE_LEN) { 819 cur_ops->cond_sync(gp_snap);
760 rp->rtort_mbtest = 0; 820 rcu_torture_pipe_update(old_rp);
761 list_del(&rp->rtort_free); 821 break;
762 rcu_torture_free(rp); 822 default:
763 } 823 WARN_ON_ONCE(1);
764 } 824 break;
765 } 825 }
766 } 826 }
767 rcutorture_record_progress(++rcu_torture_current_version); 827 rcutorture_record_progress(++rcu_torture_current_version);