diff options
Diffstat (limited to 'kernel/rcutorture.c')
| -rw-r--r-- | kernel/rcutorture.c | 241 |
1 files changed, 132 insertions, 109 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 9b4a975a4b4a..233768f21f97 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | * Copyright (C) IBM Corporation, 2005, 2006 | 18 | * Copyright (C) IBM Corporation, 2005, 2006 |
| 19 | * | 19 | * |
| 20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> | 20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> |
| 21 | * Josh Triplett <josh@freedesktop.org> | 21 | * Josh Triplett <josh@freedesktop.org> |
| 22 | * | 22 | * |
| 23 | * See also: Documentation/RCU/torture.txt | 23 | * See also: Documentation/RCU/torture.txt |
| 24 | */ | 24 | */ |
| @@ -50,7 +50,7 @@ | |||
| 50 | 50 | ||
| 51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
| 52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " | 52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " |
| 53 | "Josh Triplett <josh@freedesktop.org>"); | 53 | "Josh Triplett <josh@freedesktop.org>"); |
| 54 | 54 | ||
| 55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | 55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ |
| 56 | static int nfakewriters = 4; /* # fake writer threads */ | 56 | static int nfakewriters = 4; /* # fake writer threads */ |
| @@ -110,8 +110,8 @@ struct rcu_torture { | |||
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | static LIST_HEAD(rcu_torture_freelist); | 112 | static LIST_HEAD(rcu_torture_freelist); |
| 113 | static struct rcu_torture *rcu_torture_current = NULL; | 113 | static struct rcu_torture *rcu_torture_current; |
| 114 | static long rcu_torture_current_version = 0; | 114 | static long rcu_torture_current_version; |
| 115 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 115 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 116 | static DEFINE_SPINLOCK(rcu_torture_lock); | 116 | static DEFINE_SPINLOCK(rcu_torture_lock); |
| 117 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | 117 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = |
| @@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail; | |||
| 124 | static atomic_t n_rcu_torture_free; | 124 | static atomic_t n_rcu_torture_free; |
| 125 | static atomic_t n_rcu_torture_mberror; | 125 | static atomic_t n_rcu_torture_mberror; |
| 126 | static atomic_t n_rcu_torture_error; | 126 | static atomic_t n_rcu_torture_error; |
| 127 | static long n_rcu_torture_timers = 0; | 127 | static long n_rcu_torture_timers; |
| 128 | static struct list_head rcu_torture_removed; | 128 | static struct list_head rcu_torture_removed; |
| 129 | static cpumask_var_t shuffle_tmp_mask; | 129 | static cpumask_var_t shuffle_tmp_mask; |
| 130 | 130 | ||
| 131 | static int stutter_pause_test = 0; | 131 | static int stutter_pause_test; |
| 132 | 132 | ||
| 133 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | 133 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) |
| 134 | #define RCUTORTURE_RUNNABLE_INIT 1 | 134 | #define RCUTORTURE_RUNNABLE_INIT 1 |
| @@ -257,17 +257,18 @@ struct rcu_torture_ops { | |||
| 257 | void (*init)(void); | 257 | void (*init)(void); |
| 258 | void (*cleanup)(void); | 258 | void (*cleanup)(void); |
| 259 | int (*readlock)(void); | 259 | int (*readlock)(void); |
| 260 | void (*readdelay)(struct rcu_random_state *rrsp); | 260 | void (*read_delay)(struct rcu_random_state *rrsp); |
| 261 | void (*readunlock)(int idx); | 261 | void (*readunlock)(int idx); |
| 262 | int (*completed)(void); | 262 | int (*completed)(void); |
| 263 | void (*deferredfree)(struct rcu_torture *p); | 263 | void (*deferred_free)(struct rcu_torture *p); |
| 264 | void (*sync)(void); | 264 | void (*sync)(void); |
| 265 | void (*cb_barrier)(void); | 265 | void (*cb_barrier)(void); |
| 266 | int (*stats)(char *page); | 266 | int (*stats)(char *page); |
| 267 | int irqcapable; | 267 | int irq_capable; |
| 268 | char *name; | 268 | char *name; |
| 269 | }; | 269 | }; |
| 270 | static struct rcu_torture_ops *cur_ops = NULL; | 270 | |
| 271 | static struct rcu_torture_ops *cur_ops; | ||
| 271 | 272 | ||
| 272 | /* | 273 | /* |
| 273 | * Definitions for rcu torture testing. | 274 | * Definitions for rcu torture testing. |
| @@ -281,14 +282,17 @@ static int rcu_torture_read_lock(void) __acquires(RCU) | |||
| 281 | 282 | ||
| 282 | static void rcu_read_delay(struct rcu_random_state *rrsp) | 283 | static void rcu_read_delay(struct rcu_random_state *rrsp) |
| 283 | { | 284 | { |
| 284 | long delay; | 285 | const unsigned long shortdelay_us = 200; |
| 285 | const long longdelay = 200; | 286 | const unsigned long longdelay_ms = 50; |
| 286 | 287 | ||
| 287 | /* We want there to be long-running readers, but not all the time. */ | 288 | /* We want a short delay sometimes to make a reader delay the grace |
| 289 | * period, and we want a long delay occasionally to trigger | ||
| 290 | * force_quiescent_state. */ | ||
| 288 | 291 | ||
| 289 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay); | 292 | if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) |
| 290 | if (!delay) | 293 | mdelay(longdelay_ms); |
| 291 | udelay(longdelay); | 294 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
| 295 | udelay(shortdelay_us); | ||
| 292 | } | 296 | } |
| 293 | 297 | ||
| 294 | static void rcu_torture_read_unlock(int idx) __releases(RCU) | 298 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
| @@ -320,7 +324,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
| 320 | rp->rtort_mbtest = 0; | 324 | rp->rtort_mbtest = 0; |
| 321 | rcu_torture_free(rp); | 325 | rcu_torture_free(rp); |
| 322 | } else | 326 | } else |
| 323 | cur_ops->deferredfree(rp); | 327 | cur_ops->deferred_free(rp); |
| 324 | } | 328 | } |
| 325 | 329 | ||
| 326 | static void rcu_torture_deferred_free(struct rcu_torture *p) | 330 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
| @@ -329,18 +333,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p) | |||
| 329 | } | 333 | } |
| 330 | 334 | ||
| 331 | static struct rcu_torture_ops rcu_ops = { | 335 | static struct rcu_torture_ops rcu_ops = { |
| 332 | .init = NULL, | 336 | .init = NULL, |
| 333 | .cleanup = NULL, | 337 | .cleanup = NULL, |
| 334 | .readlock = rcu_torture_read_lock, | 338 | .readlock = rcu_torture_read_lock, |
| 335 | .readdelay = rcu_read_delay, | 339 | .read_delay = rcu_read_delay, |
| 336 | .readunlock = rcu_torture_read_unlock, | 340 | .readunlock = rcu_torture_read_unlock, |
| 337 | .completed = rcu_torture_completed, | 341 | .completed = rcu_torture_completed, |
| 338 | .deferredfree = rcu_torture_deferred_free, | 342 | .deferred_free = rcu_torture_deferred_free, |
| 339 | .sync = synchronize_rcu, | 343 | .sync = synchronize_rcu, |
| 340 | .cb_barrier = rcu_barrier, | 344 | .cb_barrier = rcu_barrier, |
| 341 | .stats = NULL, | 345 | .stats = NULL, |
| 342 | .irqcapable = 1, | 346 | .irq_capable = 1, |
| 343 | .name = "rcu" | 347 | .name = "rcu" |
| 344 | }; | 348 | }; |
| 345 | 349 | ||
| 346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | 350 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) |
| @@ -370,18 +374,18 @@ static void rcu_sync_torture_init(void) | |||
| 370 | } | 374 | } |
| 371 | 375 | ||
| 372 | static struct rcu_torture_ops rcu_sync_ops = { | 376 | static struct rcu_torture_ops rcu_sync_ops = { |
| 373 | .init = rcu_sync_torture_init, | 377 | .init = rcu_sync_torture_init, |
| 374 | .cleanup = NULL, | 378 | .cleanup = NULL, |
| 375 | .readlock = rcu_torture_read_lock, | 379 | .readlock = rcu_torture_read_lock, |
| 376 | .readdelay = rcu_read_delay, | 380 | .read_delay = rcu_read_delay, |
| 377 | .readunlock = rcu_torture_read_unlock, | 381 | .readunlock = rcu_torture_read_unlock, |
| 378 | .completed = rcu_torture_completed, | 382 | .completed = rcu_torture_completed, |
| 379 | .deferredfree = rcu_sync_torture_deferred_free, | 383 | .deferred_free = rcu_sync_torture_deferred_free, |
| 380 | .sync = synchronize_rcu, | 384 | .sync = synchronize_rcu, |
| 381 | .cb_barrier = NULL, | 385 | .cb_barrier = NULL, |
| 382 | .stats = NULL, | 386 | .stats = NULL, |
| 383 | .irqcapable = 1, | 387 | .irq_capable = 1, |
| 384 | .name = "rcu_sync" | 388 | .name = "rcu_sync" |
| 385 | }; | 389 | }; |
| 386 | 390 | ||
| 387 | /* | 391 | /* |
| @@ -432,33 +436,33 @@ static void rcu_bh_torture_synchronize(void) | |||
| 432 | } | 436 | } |
| 433 | 437 | ||
| 434 | static struct rcu_torture_ops rcu_bh_ops = { | 438 | static struct rcu_torture_ops rcu_bh_ops = { |
| 435 | .init = NULL, | 439 | .init = NULL, |
| 436 | .cleanup = NULL, | 440 | .cleanup = NULL, |
| 437 | .readlock = rcu_bh_torture_read_lock, | 441 | .readlock = rcu_bh_torture_read_lock, |
| 438 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 442 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 439 | .readunlock = rcu_bh_torture_read_unlock, | 443 | .readunlock = rcu_bh_torture_read_unlock, |
| 440 | .completed = rcu_bh_torture_completed, | 444 | .completed = rcu_bh_torture_completed, |
| 441 | .deferredfree = rcu_bh_torture_deferred_free, | 445 | .deferred_free = rcu_bh_torture_deferred_free, |
| 442 | .sync = rcu_bh_torture_synchronize, | 446 | .sync = rcu_bh_torture_synchronize, |
| 443 | .cb_barrier = rcu_barrier_bh, | 447 | .cb_barrier = rcu_barrier_bh, |
| 444 | .stats = NULL, | 448 | .stats = NULL, |
| 445 | .irqcapable = 1, | 449 | .irq_capable = 1, |
| 446 | .name = "rcu_bh" | 450 | .name = "rcu_bh" |
| 447 | }; | 451 | }; |
| 448 | 452 | ||
| 449 | static struct rcu_torture_ops rcu_bh_sync_ops = { | 453 | static struct rcu_torture_ops rcu_bh_sync_ops = { |
| 450 | .init = rcu_sync_torture_init, | 454 | .init = rcu_sync_torture_init, |
| 451 | .cleanup = NULL, | 455 | .cleanup = NULL, |
| 452 | .readlock = rcu_bh_torture_read_lock, | 456 | .readlock = rcu_bh_torture_read_lock, |
| 453 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 457 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 454 | .readunlock = rcu_bh_torture_read_unlock, | 458 | .readunlock = rcu_bh_torture_read_unlock, |
| 455 | .completed = rcu_bh_torture_completed, | 459 | .completed = rcu_bh_torture_completed, |
| 456 | .deferredfree = rcu_sync_torture_deferred_free, | 460 | .deferred_free = rcu_sync_torture_deferred_free, |
| 457 | .sync = rcu_bh_torture_synchronize, | 461 | .sync = rcu_bh_torture_synchronize, |
| 458 | .cb_barrier = NULL, | 462 | .cb_barrier = NULL, |
| 459 | .stats = NULL, | 463 | .stats = NULL, |
| 460 | .irqcapable = 1, | 464 | .irq_capable = 1, |
| 461 | .name = "rcu_bh_sync" | 465 | .name = "rcu_bh_sync" |
| 462 | }; | 466 | }; |
| 463 | 467 | ||
| 464 | /* | 468 | /* |
| @@ -530,17 +534,17 @@ static int srcu_torture_stats(char *page) | |||
| 530 | } | 534 | } |
| 531 | 535 | ||
| 532 | static struct rcu_torture_ops srcu_ops = { | 536 | static struct rcu_torture_ops srcu_ops = { |
| 533 | .init = srcu_torture_init, | 537 | .init = srcu_torture_init, |
| 534 | .cleanup = srcu_torture_cleanup, | 538 | .cleanup = srcu_torture_cleanup, |
| 535 | .readlock = srcu_torture_read_lock, | 539 | .readlock = srcu_torture_read_lock, |
| 536 | .readdelay = srcu_read_delay, | 540 | .read_delay = srcu_read_delay, |
| 537 | .readunlock = srcu_torture_read_unlock, | 541 | .readunlock = srcu_torture_read_unlock, |
| 538 | .completed = srcu_torture_completed, | 542 | .completed = srcu_torture_completed, |
| 539 | .deferredfree = rcu_sync_torture_deferred_free, | 543 | .deferred_free = rcu_sync_torture_deferred_free, |
| 540 | .sync = srcu_torture_synchronize, | 544 | .sync = srcu_torture_synchronize, |
| 541 | .cb_barrier = NULL, | 545 | .cb_barrier = NULL, |
| 542 | .stats = srcu_torture_stats, | 546 | .stats = srcu_torture_stats, |
| 543 | .name = "srcu" | 547 | .name = "srcu" |
| 544 | }; | 548 | }; |
| 545 | 549 | ||
| 546 | /* | 550 | /* |
| @@ -574,32 +578,49 @@ static void sched_torture_synchronize(void) | |||
| 574 | } | 578 | } |
| 575 | 579 | ||
| 576 | static struct rcu_torture_ops sched_ops = { | 580 | static struct rcu_torture_ops sched_ops = { |
| 577 | .init = rcu_sync_torture_init, | 581 | .init = rcu_sync_torture_init, |
| 578 | .cleanup = NULL, | 582 | .cleanup = NULL, |
| 579 | .readlock = sched_torture_read_lock, | 583 | .readlock = sched_torture_read_lock, |
| 580 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 584 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 581 | .readunlock = sched_torture_read_unlock, | 585 | .readunlock = sched_torture_read_unlock, |
| 582 | .completed = sched_torture_completed, | 586 | .completed = sched_torture_completed, |
| 583 | .deferredfree = rcu_sched_torture_deferred_free, | 587 | .deferred_free = rcu_sched_torture_deferred_free, |
| 584 | .sync = sched_torture_synchronize, | 588 | .sync = sched_torture_synchronize, |
| 585 | .cb_barrier = rcu_barrier_sched, | 589 | .cb_barrier = rcu_barrier_sched, |
| 586 | .stats = NULL, | 590 | .stats = NULL, |
| 587 | .irqcapable = 1, | 591 | .irq_capable = 1, |
| 588 | .name = "sched" | 592 | .name = "sched" |
| 589 | }; | 593 | }; |
| 590 | 594 | ||
| 591 | static struct rcu_torture_ops sched_ops_sync = { | 595 | static struct rcu_torture_ops sched_ops_sync = { |
| 592 | .init = rcu_sync_torture_init, | 596 | .init = rcu_sync_torture_init, |
| 593 | .cleanup = NULL, | 597 | .cleanup = NULL, |
| 594 | .readlock = sched_torture_read_lock, | 598 | .readlock = sched_torture_read_lock, |
| 595 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 599 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 596 | .readunlock = sched_torture_read_unlock, | 600 | .readunlock = sched_torture_read_unlock, |
| 597 | .completed = sched_torture_completed, | 601 | .completed = sched_torture_completed, |
| 598 | .deferredfree = rcu_sync_torture_deferred_free, | 602 | .deferred_free = rcu_sync_torture_deferred_free, |
| 599 | .sync = sched_torture_synchronize, | 603 | .sync = sched_torture_synchronize, |
| 600 | .cb_barrier = NULL, | 604 | .cb_barrier = NULL, |
| 601 | .stats = NULL, | 605 | .stats = NULL, |
| 602 | .name = "sched_sync" | 606 | .name = "sched_sync" |
| 607 | }; | ||
| 608 | |||
| 609 | extern int rcu_expedited_torture_stats(char *page); | ||
| 610 | |||
| 611 | static struct rcu_torture_ops sched_expedited_ops = { | ||
| 612 | .init = rcu_sync_torture_init, | ||
| 613 | .cleanup = NULL, | ||
| 614 | .readlock = sched_torture_read_lock, | ||
| 615 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
| 616 | .readunlock = sched_torture_read_unlock, | ||
| 617 | .completed = sched_torture_completed, | ||
| 618 | .deferred_free = rcu_sync_torture_deferred_free, | ||
| 619 | .sync = synchronize_sched_expedited, | ||
| 620 | .cb_barrier = NULL, | ||
| 621 | .stats = rcu_expedited_torture_stats, | ||
| 622 | .irq_capable = 1, | ||
| 623 | .name = "sched_expedited" | ||
| 603 | }; | 624 | }; |
| 604 | 625 | ||
| 605 | /* | 626 | /* |
| @@ -621,7 +642,8 @@ rcu_torture_writer(void *arg) | |||
| 621 | 642 | ||
| 622 | do { | 643 | do { |
| 623 | schedule_timeout_uninterruptible(1); | 644 | schedule_timeout_uninterruptible(1); |
| 624 | if ((rp = rcu_torture_alloc()) == NULL) | 645 | rp = rcu_torture_alloc(); |
| 646 | if (rp == NULL) | ||
| 625 | continue; | 647 | continue; |
| 626 | rp->rtort_pipe_count = 0; | 648 | rp->rtort_pipe_count = 0; |
| 627 | udelay(rcu_random(&rand) & 0x3ff); | 649 | udelay(rcu_random(&rand) & 0x3ff); |
| @@ -635,7 +657,7 @@ rcu_torture_writer(void *arg) | |||
| 635 | i = RCU_TORTURE_PIPE_LEN; | 657 | i = RCU_TORTURE_PIPE_LEN; |
| 636 | atomic_inc(&rcu_torture_wcount[i]); | 658 | atomic_inc(&rcu_torture_wcount[i]); |
| 637 | old_rp->rtort_pipe_count++; | 659 | old_rp->rtort_pipe_count++; |
| 638 | cur_ops->deferredfree(old_rp); | 660 | cur_ops->deferred_free(old_rp); |
| 639 | } | 661 | } |
| 640 | rcu_torture_current_version++; | 662 | rcu_torture_current_version++; |
| 641 | oldbatch = cur_ops->completed(); | 663 | oldbatch = cur_ops->completed(); |
| @@ -700,7 +722,7 @@ static void rcu_torture_timer(unsigned long unused) | |||
| 700 | if (p->rtort_mbtest == 0) | 722 | if (p->rtort_mbtest == 0) |
| 701 | atomic_inc(&n_rcu_torture_mberror); | 723 | atomic_inc(&n_rcu_torture_mberror); |
| 702 | spin_lock(&rand_lock); | 724 | spin_lock(&rand_lock); |
| 703 | cur_ops->readdelay(&rand); | 725 | cur_ops->read_delay(&rand); |
| 704 | n_rcu_torture_timers++; | 726 | n_rcu_torture_timers++; |
| 705 | spin_unlock(&rand_lock); | 727 | spin_unlock(&rand_lock); |
| 706 | preempt_disable(); | 728 | preempt_disable(); |
| @@ -738,11 +760,11 @@ rcu_torture_reader(void *arg) | |||
| 738 | 760 | ||
| 739 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 761 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); |
| 740 | set_user_nice(current, 19); | 762 | set_user_nice(current, 19); |
| 741 | if (irqreader && cur_ops->irqcapable) | 763 | if (irqreader && cur_ops->irq_capable) |
| 742 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | 764 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
| 743 | 765 | ||
| 744 | do { | 766 | do { |
| 745 | if (irqreader && cur_ops->irqcapable) { | 767 | if (irqreader && cur_ops->irq_capable) { |
| 746 | if (!timer_pending(&t)) | 768 | if (!timer_pending(&t)) |
| 747 | mod_timer(&t, 1); | 769 | mod_timer(&t, 1); |
| 748 | } | 770 | } |
| @@ -757,7 +779,7 @@ rcu_torture_reader(void *arg) | |||
| 757 | } | 779 | } |
| 758 | if (p->rtort_mbtest == 0) | 780 | if (p->rtort_mbtest == 0) |
| 759 | atomic_inc(&n_rcu_torture_mberror); | 781 | atomic_inc(&n_rcu_torture_mberror); |
| 760 | cur_ops->readdelay(&rand); | 782 | cur_ops->read_delay(&rand); |
| 761 | preempt_disable(); | 783 | preempt_disable(); |
| 762 | pipe_count = p->rtort_pipe_count; | 784 | pipe_count = p->rtort_pipe_count; |
| 763 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | 785 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
| @@ -778,7 +800,7 @@ rcu_torture_reader(void *arg) | |||
| 778 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 800 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
| 779 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 801 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
| 780 | rcutorture_shutdown_absorb("rcu_torture_reader"); | 802 | rcutorture_shutdown_absorb("rcu_torture_reader"); |
| 781 | if (irqreader && cur_ops->irqcapable) | 803 | if (irqreader && cur_ops->irq_capable) |
| 782 | del_timer_sync(&t); | 804 | del_timer_sync(&t); |
| 783 | while (!kthread_should_stop()) | 805 | while (!kthread_should_stop()) |
| 784 | schedule_timeout_uninterruptible(1); | 806 | schedule_timeout_uninterruptible(1); |
| @@ -1078,6 +1100,7 @@ rcu_torture_init(void) | |||
| 1078 | int firsterr = 0; | 1100 | int firsterr = 0; |
| 1079 | static struct rcu_torture_ops *torture_ops[] = | 1101 | static struct rcu_torture_ops *torture_ops[] = |
| 1080 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1102 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, |
| 1103 | &sched_expedited_ops, | ||
| 1081 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1104 | &srcu_ops, &sched_ops, &sched_ops_sync, }; |
| 1082 | 1105 | ||
| 1083 | mutex_lock(&fullstop_mutex); | 1106 | mutex_lock(&fullstop_mutex); |
| @@ -1092,7 +1115,7 @@ rcu_torture_init(void) | |||
| 1092 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | 1115 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", |
| 1093 | torture_type); | 1116 | torture_type); |
| 1094 | mutex_unlock(&fullstop_mutex); | 1117 | mutex_unlock(&fullstop_mutex); |
| 1095 | return (-EINVAL); | 1118 | return -EINVAL; |
| 1096 | } | 1119 | } |
| 1097 | if (cur_ops->init) | 1120 | if (cur_ops->init) |
| 1098 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | 1121 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ |
| @@ -1143,7 +1166,7 @@ rcu_torture_init(void) | |||
| 1143 | goto unwind; | 1166 | goto unwind; |
| 1144 | } | 1167 | } |
| 1145 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | 1168 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
| 1146 | GFP_KERNEL); | 1169 | GFP_KERNEL); |
| 1147 | if (fakewriter_tasks == NULL) { | 1170 | if (fakewriter_tasks == NULL) { |
| 1148 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1171 | VERBOSE_PRINTK_ERRSTRING("out of memory"); |
| 1149 | firsterr = -ENOMEM; | 1172 | firsterr = -ENOMEM; |
| @@ -1152,7 +1175,7 @@ rcu_torture_init(void) | |||
| 1152 | for (i = 0; i < nfakewriters; i++) { | 1175 | for (i = 0; i < nfakewriters; i++) { |
| 1153 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); | 1176 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); |
| 1154 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, | 1177 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, |
| 1155 | "rcu_torture_fakewriter"); | 1178 | "rcu_torture_fakewriter"); |
| 1156 | if (IS_ERR(fakewriter_tasks[i])) { | 1179 | if (IS_ERR(fakewriter_tasks[i])) { |
| 1157 | firsterr = PTR_ERR(fakewriter_tasks[i]); | 1180 | firsterr = PTR_ERR(fakewriter_tasks[i]); |
| 1158 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); | 1181 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); |
