diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-06-25 12:08:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-07-03 04:02:29 -0400 |
commit | 0acc512cb1a29636df5e982c7d845edafe77c2d0 (patch) | |
tree | 0760cc56d3fdf6f2f72e2bfa92b811bedaf6a6d5 | |
parent | 03b042bf1dc14a268a3d65d38b4ec2a4261e8477 (diff) |
rcu: Add synchronize_sched_expedited() torture tests
This patch adds rcutorture tests for the new
synchronize_sched_expedited() primitive, and also does some
whitespace cleanups in kernel/rcutorture.c as well.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: akpm@linux-foundation.org
Cc: torvalds@linux-foundation.org
Cc: davem@davemloft.net
Cc: dada1@cosmosbay.com
Cc: zbr@ioremap.net
Cc: jeff.chua.linux@gmail.com
Cc: paulus@samba.org
Cc: laijs@cn.fujitsu.com
Cc: jengelh@medozas.de
Cc: r000n@r000n.net
Cc: benh@kernel.crashing.org
Cc: mathieu.desnoyers@polymtl.ca
LKML-Reference: <12459460981342-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/rcutorture.c | 202 |
1 files changed, 110 insertions, 92 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 9b4a975a4b4a..b33db539a8ad 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -257,14 +257,14 @@ struct rcu_torture_ops { | |||
257 | void (*init)(void); | 257 | void (*init)(void); |
258 | void (*cleanup)(void); | 258 | void (*cleanup)(void); |
259 | int (*readlock)(void); | 259 | int (*readlock)(void); |
260 | void (*readdelay)(struct rcu_random_state *rrsp); | 260 | void (*read_delay)(struct rcu_random_state *rrsp); |
261 | void (*readunlock)(int idx); | 261 | void (*readunlock)(int idx); |
262 | int (*completed)(void); | 262 | int (*completed)(void); |
263 | void (*deferredfree)(struct rcu_torture *p); | 263 | void (*deferred_free)(struct rcu_torture *p); |
264 | void (*sync)(void); | 264 | void (*sync)(void); |
265 | void (*cb_barrier)(void); | 265 | void (*cb_barrier)(void); |
266 | int (*stats)(char *page); | 266 | int (*stats)(char *page); |
267 | int irqcapable; | 267 | int irq_capable; |
268 | char *name; | 268 | char *name; |
269 | }; | 269 | }; |
270 | static struct rcu_torture_ops *cur_ops = NULL; | 270 | static struct rcu_torture_ops *cur_ops = NULL; |
@@ -320,7 +320,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
320 | rp->rtort_mbtest = 0; | 320 | rp->rtort_mbtest = 0; |
321 | rcu_torture_free(rp); | 321 | rcu_torture_free(rp); |
322 | } else | 322 | } else |
323 | cur_ops->deferredfree(rp); | 323 | cur_ops->deferred_free(rp); |
324 | } | 324 | } |
325 | 325 | ||
326 | static void rcu_torture_deferred_free(struct rcu_torture *p) | 326 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
@@ -329,18 +329,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | static struct rcu_torture_ops rcu_ops = { | 331 | static struct rcu_torture_ops rcu_ops = { |
332 | .init = NULL, | 332 | .init = NULL, |
333 | .cleanup = NULL, | 333 | .cleanup = NULL, |
334 | .readlock = rcu_torture_read_lock, | 334 | .readlock = rcu_torture_read_lock, |
335 | .readdelay = rcu_read_delay, | 335 | .read_delay = rcu_read_delay, |
336 | .readunlock = rcu_torture_read_unlock, | 336 | .readunlock = rcu_torture_read_unlock, |
337 | .completed = rcu_torture_completed, | 337 | .completed = rcu_torture_completed, |
338 | .deferredfree = rcu_torture_deferred_free, | 338 | .deferred_free = rcu_torture_deferred_free, |
339 | .sync = synchronize_rcu, | 339 | .sync = synchronize_rcu, |
340 | .cb_barrier = rcu_barrier, | 340 | .cb_barrier = rcu_barrier, |
341 | .stats = NULL, | 341 | .stats = NULL, |
342 | .irqcapable = 1, | 342 | .irq_capable = 1, |
343 | .name = "rcu" | 343 | .name = "rcu" |
344 | }; | 344 | }; |
345 | 345 | ||
346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | 346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) |
@@ -370,18 +370,18 @@ static void rcu_sync_torture_init(void) | |||
370 | } | 370 | } |
371 | 371 | ||
372 | static struct rcu_torture_ops rcu_sync_ops = { | 372 | static struct rcu_torture_ops rcu_sync_ops = { |
373 | .init = rcu_sync_torture_init, | 373 | .init = rcu_sync_torture_init, |
374 | .cleanup = NULL, | 374 | .cleanup = NULL, |
375 | .readlock = rcu_torture_read_lock, | 375 | .readlock = rcu_torture_read_lock, |
376 | .readdelay = rcu_read_delay, | 376 | .read_delay = rcu_read_delay, |
377 | .readunlock = rcu_torture_read_unlock, | 377 | .readunlock = rcu_torture_read_unlock, |
378 | .completed = rcu_torture_completed, | 378 | .completed = rcu_torture_completed, |
379 | .deferredfree = rcu_sync_torture_deferred_free, | 379 | .deferred_free = rcu_sync_torture_deferred_free, |
380 | .sync = synchronize_rcu, | 380 | .sync = synchronize_rcu, |
381 | .cb_barrier = NULL, | 381 | .cb_barrier = NULL, |
382 | .stats = NULL, | 382 | .stats = NULL, |
383 | .irqcapable = 1, | 383 | .irq_capable = 1, |
384 | .name = "rcu_sync" | 384 | .name = "rcu_sync" |
385 | }; | 385 | }; |
386 | 386 | ||
387 | /* | 387 | /* |
@@ -432,33 +432,33 @@ static void rcu_bh_torture_synchronize(void) | |||
432 | } | 432 | } |
433 | 433 | ||
434 | static struct rcu_torture_ops rcu_bh_ops = { | 434 | static struct rcu_torture_ops rcu_bh_ops = { |
435 | .init = NULL, | 435 | .init = NULL, |
436 | .cleanup = NULL, | 436 | .cleanup = NULL, |
437 | .readlock = rcu_bh_torture_read_lock, | 437 | .readlock = rcu_bh_torture_read_lock, |
438 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 438 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
439 | .readunlock = rcu_bh_torture_read_unlock, | 439 | .readunlock = rcu_bh_torture_read_unlock, |
440 | .completed = rcu_bh_torture_completed, | 440 | .completed = rcu_bh_torture_completed, |
441 | .deferredfree = rcu_bh_torture_deferred_free, | 441 | .deferred_free = rcu_bh_torture_deferred_free, |
442 | .sync = rcu_bh_torture_synchronize, | 442 | .sync = rcu_bh_torture_synchronize, |
443 | .cb_barrier = rcu_barrier_bh, | 443 | .cb_barrier = rcu_barrier_bh, |
444 | .stats = NULL, | 444 | .stats = NULL, |
445 | .irqcapable = 1, | 445 | .irq_capable = 1, |
446 | .name = "rcu_bh" | 446 | .name = "rcu_bh" |
447 | }; | 447 | }; |
448 | 448 | ||
449 | static struct rcu_torture_ops rcu_bh_sync_ops = { | 449 | static struct rcu_torture_ops rcu_bh_sync_ops = { |
450 | .init = rcu_sync_torture_init, | 450 | .init = rcu_sync_torture_init, |
451 | .cleanup = NULL, | 451 | .cleanup = NULL, |
452 | .readlock = rcu_bh_torture_read_lock, | 452 | .readlock = rcu_bh_torture_read_lock, |
453 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 453 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
454 | .readunlock = rcu_bh_torture_read_unlock, | 454 | .readunlock = rcu_bh_torture_read_unlock, |
455 | .completed = rcu_bh_torture_completed, | 455 | .completed = rcu_bh_torture_completed, |
456 | .deferredfree = rcu_sync_torture_deferred_free, | 456 | .deferred_free = rcu_sync_torture_deferred_free, |
457 | .sync = rcu_bh_torture_synchronize, | 457 | .sync = rcu_bh_torture_synchronize, |
458 | .cb_barrier = NULL, | 458 | .cb_barrier = NULL, |
459 | .stats = NULL, | 459 | .stats = NULL, |
460 | .irqcapable = 1, | 460 | .irq_capable = 1, |
461 | .name = "rcu_bh_sync" | 461 | .name = "rcu_bh_sync" |
462 | }; | 462 | }; |
463 | 463 | ||
464 | /* | 464 | /* |
@@ -530,17 +530,17 @@ static int srcu_torture_stats(char *page) | |||
530 | } | 530 | } |
531 | 531 | ||
532 | static struct rcu_torture_ops srcu_ops = { | 532 | static struct rcu_torture_ops srcu_ops = { |
533 | .init = srcu_torture_init, | 533 | .init = srcu_torture_init, |
534 | .cleanup = srcu_torture_cleanup, | 534 | .cleanup = srcu_torture_cleanup, |
535 | .readlock = srcu_torture_read_lock, | 535 | .readlock = srcu_torture_read_lock, |
536 | .readdelay = srcu_read_delay, | 536 | .read_delay = srcu_read_delay, |
537 | .readunlock = srcu_torture_read_unlock, | 537 | .readunlock = srcu_torture_read_unlock, |
538 | .completed = srcu_torture_completed, | 538 | .completed = srcu_torture_completed, |
539 | .deferredfree = rcu_sync_torture_deferred_free, | 539 | .deferred_free = rcu_sync_torture_deferred_free, |
540 | .sync = srcu_torture_synchronize, | 540 | .sync = srcu_torture_synchronize, |
541 | .cb_barrier = NULL, | 541 | .cb_barrier = NULL, |
542 | .stats = srcu_torture_stats, | 542 | .stats = srcu_torture_stats, |
543 | .name = "srcu" | 543 | .name = "srcu" |
544 | }; | 544 | }; |
545 | 545 | ||
546 | /* | 546 | /* |
@@ -574,32 +574,49 @@ static void sched_torture_synchronize(void) | |||
574 | } | 574 | } |
575 | 575 | ||
576 | static struct rcu_torture_ops sched_ops = { | 576 | static struct rcu_torture_ops sched_ops = { |
577 | .init = rcu_sync_torture_init, | 577 | .init = rcu_sync_torture_init, |
578 | .cleanup = NULL, | 578 | .cleanup = NULL, |
579 | .readlock = sched_torture_read_lock, | 579 | .readlock = sched_torture_read_lock, |
580 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 580 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
581 | .readunlock = sched_torture_read_unlock, | 581 | .readunlock = sched_torture_read_unlock, |
582 | .completed = sched_torture_completed, | 582 | .completed = sched_torture_completed, |
583 | .deferredfree = rcu_sched_torture_deferred_free, | 583 | .deferred_free = rcu_sched_torture_deferred_free, |
584 | .sync = sched_torture_synchronize, | 584 | .sync = sched_torture_synchronize, |
585 | .cb_barrier = rcu_barrier_sched, | 585 | .cb_barrier = rcu_barrier_sched, |
586 | .stats = NULL, | 586 | .stats = NULL, |
587 | .irqcapable = 1, | 587 | .irq_capable = 1, |
588 | .name = "sched" | 588 | .name = "sched" |
589 | }; | 589 | }; |
590 | 590 | ||
591 | static struct rcu_torture_ops sched_ops_sync = { | 591 | static struct rcu_torture_ops sched_ops_sync = { |
592 | .init = rcu_sync_torture_init, | 592 | .init = rcu_sync_torture_init, |
593 | .cleanup = NULL, | 593 | .cleanup = NULL, |
594 | .readlock = sched_torture_read_lock, | 594 | .readlock = sched_torture_read_lock, |
595 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 595 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
596 | .readunlock = sched_torture_read_unlock, | 596 | .readunlock = sched_torture_read_unlock, |
597 | .completed = sched_torture_completed, | 597 | .completed = sched_torture_completed, |
598 | .deferredfree = rcu_sync_torture_deferred_free, | 598 | .deferred_free = rcu_sync_torture_deferred_free, |
599 | .sync = sched_torture_synchronize, | 599 | .sync = sched_torture_synchronize, |
600 | .cb_barrier = NULL, | 600 | .cb_barrier = NULL, |
601 | .stats = NULL, | 601 | .stats = NULL, |
602 | .name = "sched_sync" | 602 | .name = "sched_sync" |
603 | }; | ||
604 | |||
605 | extern int rcu_expedited_torture_stats(char *page); | ||
606 | |||
607 | static struct rcu_torture_ops sched_expedited_ops = { | ||
608 | .init = rcu_sync_torture_init, | ||
609 | .cleanup = NULL, | ||
610 | .readlock = sched_torture_read_lock, | ||
611 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
612 | .readunlock = sched_torture_read_unlock, | ||
613 | .completed = sched_torture_completed, | ||
614 | .deferred_free = rcu_sync_torture_deferred_free, | ||
615 | .sync = synchronize_sched_expedited, | ||
616 | .cb_barrier = NULL, | ||
617 | .stats = rcu_expedited_torture_stats, | ||
618 | .irq_capable = 1, | ||
619 | .name = "sched_expedited" | ||
603 | }; | 620 | }; |
604 | 621 | ||
605 | /* | 622 | /* |
@@ -635,7 +652,7 @@ rcu_torture_writer(void *arg) | |||
635 | i = RCU_TORTURE_PIPE_LEN; | 652 | i = RCU_TORTURE_PIPE_LEN; |
636 | atomic_inc(&rcu_torture_wcount[i]); | 653 | atomic_inc(&rcu_torture_wcount[i]); |
637 | old_rp->rtort_pipe_count++; | 654 | old_rp->rtort_pipe_count++; |
638 | cur_ops->deferredfree(old_rp); | 655 | cur_ops->deferred_free(old_rp); |
639 | } | 656 | } |
640 | rcu_torture_current_version++; | 657 | rcu_torture_current_version++; |
641 | oldbatch = cur_ops->completed(); | 658 | oldbatch = cur_ops->completed(); |
@@ -700,7 +717,7 @@ static void rcu_torture_timer(unsigned long unused) | |||
700 | if (p->rtort_mbtest == 0) | 717 | if (p->rtort_mbtest == 0) |
701 | atomic_inc(&n_rcu_torture_mberror); | 718 | atomic_inc(&n_rcu_torture_mberror); |
702 | spin_lock(&rand_lock); | 719 | spin_lock(&rand_lock); |
703 | cur_ops->readdelay(&rand); | 720 | cur_ops->read_delay(&rand); |
704 | n_rcu_torture_timers++; | 721 | n_rcu_torture_timers++; |
705 | spin_unlock(&rand_lock); | 722 | spin_unlock(&rand_lock); |
706 | preempt_disable(); | 723 | preempt_disable(); |
@@ -738,11 +755,11 @@ rcu_torture_reader(void *arg) | |||
738 | 755 | ||
739 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 756 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); |
740 | set_user_nice(current, 19); | 757 | set_user_nice(current, 19); |
741 | if (irqreader && cur_ops->irqcapable) | 758 | if (irqreader && cur_ops->irq_capable) |
742 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | 759 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
743 | 760 | ||
744 | do { | 761 | do { |
745 | if (irqreader && cur_ops->irqcapable) { | 762 | if (irqreader && cur_ops->irq_capable) { |
746 | if (!timer_pending(&t)) | 763 | if (!timer_pending(&t)) |
747 | mod_timer(&t, 1); | 764 | mod_timer(&t, 1); |
748 | } | 765 | } |
@@ -757,7 +774,7 @@ rcu_torture_reader(void *arg) | |||
757 | } | 774 | } |
758 | if (p->rtort_mbtest == 0) | 775 | if (p->rtort_mbtest == 0) |
759 | atomic_inc(&n_rcu_torture_mberror); | 776 | atomic_inc(&n_rcu_torture_mberror); |
760 | cur_ops->readdelay(&rand); | 777 | cur_ops->read_delay(&rand); |
761 | preempt_disable(); | 778 | preempt_disable(); |
762 | pipe_count = p->rtort_pipe_count; | 779 | pipe_count = p->rtort_pipe_count; |
763 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | 780 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
@@ -778,7 +795,7 @@ rcu_torture_reader(void *arg) | |||
778 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 795 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
779 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 796 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
780 | rcutorture_shutdown_absorb("rcu_torture_reader"); | 797 | rcutorture_shutdown_absorb("rcu_torture_reader"); |
781 | if (irqreader && cur_ops->irqcapable) | 798 | if (irqreader && cur_ops->irq_capable) |
782 | del_timer_sync(&t); | 799 | del_timer_sync(&t); |
783 | while (!kthread_should_stop()) | 800 | while (!kthread_should_stop()) |
784 | schedule_timeout_uninterruptible(1); | 801 | schedule_timeout_uninterruptible(1); |
@@ -1078,6 +1095,7 @@ rcu_torture_init(void) | |||
1078 | int firsterr = 0; | 1095 | int firsterr = 0; |
1079 | static struct rcu_torture_ops *torture_ops[] = | 1096 | static struct rcu_torture_ops *torture_ops[] = |
1080 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1097 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, |
1098 | &sched_expedited_ops, | ||
1081 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1099 | &srcu_ops, &sched_ops, &sched_ops_sync, }; |
1082 | 1100 | ||
1083 | mutex_lock(&fullstop_mutex); | 1101 | mutex_lock(&fullstop_mutex); |