diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/pid.c | 1 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 30 | ||||
-rw-r--r-- | kernel/rcupdate.c | 71 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 418 | ||||
-rw-r--r-- | kernel/rcupreempt_trace.c | 1 | ||||
-rw-r--r-- | kernel/rcutorture.c | 101 | ||||
-rw-r--r-- | kernel/sysctl.c | 13 |
7 files changed, 558 insertions, 77 deletions
diff --git a/kernel/pid.c b/kernel/pid.c index 20d59fa2d49..30bd5d4b2ac 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/rculist.h> | ||
33 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
34 | #include <linux/hash.h> | 35 | #include <linux/hash.h> |
35 | #include <linux/pid_namespace.h> | 36 | #include <linux/pid_namespace.h> |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index f4ffbd0f306..d8348792f9f 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -502,10 +502,38 @@ void rcu_check_callbacks(int cpu, int user) | |||
502 | if (user || | 502 | if (user || |
503 | (idle_cpu(cpu) && !in_softirq() && | 503 | (idle_cpu(cpu) && !in_softirq() && |
504 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 504 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
505 | |||
506 | /* | ||
507 | * Get here if this CPU took its interrupt from user | ||
508 | * mode or from the idle loop, and if this is not a | ||
509 | * nested interrupt. In this case, the CPU is in | ||
510 | * a quiescent state, so count it. | ||
511 | * | ||
512 | * Also do a memory barrier. This is needed to handle | ||
513 | * the case where writes from a preempt-disable section | ||
514 | * of code get reordered into schedule() by this CPU's | ||
515 | * write buffer. The memory barrier makes sure that | ||
516 | * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see | ||
517 | * by other CPUs to happen after any such write. | ||
518 | */ | ||
519 | |||
520 | smp_mb(); /* See above block comment. */ | ||
505 | rcu_qsctr_inc(cpu); | 521 | rcu_qsctr_inc(cpu); |
506 | rcu_bh_qsctr_inc(cpu); | 522 | rcu_bh_qsctr_inc(cpu); |
507 | } else if (!in_softirq()) | 523 | |
524 | } else if (!in_softirq()) { | ||
525 | |||
526 | /* | ||
527 | * Get here if this CPU did not take its interrupt from | ||
528 | * softirq, in other words, if it is not interrupting | ||
529 | * a rcu_bh read-side critical section. This is an _bh | ||
530 | * critical section, so count it. The memory barrier | ||
531 | * is needed for the same reason as is the above one. | ||
532 | */ | ||
533 | |||
534 | smp_mb(); /* See above block comment. */ | ||
508 | rcu_bh_qsctr_inc(cpu); | 535 | rcu_bh_qsctr_inc(cpu); |
536 | } | ||
509 | raise_rcu_softirq(); | 537 | raise_rcu_softirq(); |
510 | } | 538 | } |
511 | 539 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index c09605f8d16..4a74b8d48d9 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -39,16 +39,16 @@ | |||
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/completion.h> | ||
43 | #include <linux/percpu.h> | 42 | #include <linux/percpu.h> |
44 | #include <linux/notifier.h> | 43 | #include <linux/notifier.h> |
45 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
46 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
47 | #include <linux/module.h> | 46 | #include <linux/module.h> |
48 | 47 | ||
49 | struct rcu_synchronize { | 48 | enum rcu_barrier { |
50 | struct rcu_head head; | 49 | RCU_BARRIER_STD, |
51 | struct completion completion; | 50 | RCU_BARRIER_BH, |
51 | RCU_BARRIER_SCHED, | ||
52 | }; | 52 | }; |
53 | 53 | ||
54 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 54 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
@@ -60,7 +60,7 @@ static struct completion rcu_barrier_completion; | |||
60 | * Awaken the corresponding synchronize_rcu() instance now that a | 60 | * Awaken the corresponding synchronize_rcu() instance now that a |
61 | * grace period has elapsed. | 61 | * grace period has elapsed. |
62 | */ | 62 | */ |
63 | static void wakeme_after_rcu(struct rcu_head *head) | 63 | void wakeme_after_rcu(struct rcu_head *head) |
64 | { | 64 | { |
65 | struct rcu_synchronize *rcu; | 65 | struct rcu_synchronize *rcu; |
66 | 66 | ||
@@ -77,17 +77,7 @@ static void wakeme_after_rcu(struct rcu_head *head) | |||
77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
78 | * and may be nested. | 78 | * and may be nested. |
79 | */ | 79 | */ |
80 | void synchronize_rcu(void) | 80 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) |
81 | { | ||
82 | struct rcu_synchronize rcu; | ||
83 | |||
84 | init_completion(&rcu.completion); | ||
85 | /* Will wake me after RCU finished */ | ||
86 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
87 | |||
88 | /* Wait for it */ | ||
89 | wait_for_completion(&rcu.completion); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 81 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
92 | 82 | ||
93 | static void rcu_barrier_callback(struct rcu_head *notused) | 83 | static void rcu_barrier_callback(struct rcu_head *notused) |
@@ -99,19 +89,30 @@ static void rcu_barrier_callback(struct rcu_head *notused) | |||
99 | /* | 89 | /* |
100 | * Called with preemption disabled, and from cross-cpu IRQ context. | 90 | * Called with preemption disabled, and from cross-cpu IRQ context. |
101 | */ | 91 | */ |
102 | static void rcu_barrier_func(void *notused) | 92 | static void rcu_barrier_func(void *type) |
103 | { | 93 | { |
104 | int cpu = smp_processor_id(); | 94 | int cpu = smp_processor_id(); |
105 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | 95 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); |
106 | 96 | ||
107 | atomic_inc(&rcu_barrier_cpu_count); | 97 | atomic_inc(&rcu_barrier_cpu_count); |
108 | call_rcu(head, rcu_barrier_callback); | 98 | switch ((enum rcu_barrier)type) { |
99 | case RCU_BARRIER_STD: | ||
100 | call_rcu(head, rcu_barrier_callback); | ||
101 | break; | ||
102 | case RCU_BARRIER_BH: | ||
103 | call_rcu_bh(head, rcu_barrier_callback); | ||
104 | break; | ||
105 | case RCU_BARRIER_SCHED: | ||
106 | call_rcu_sched(head, rcu_barrier_callback); | ||
107 | break; | ||
108 | } | ||
109 | } | 109 | } |
110 | 110 | ||
111 | /** | 111 | /* |
112 | * rcu_barrier - Wait until all the in-flight RCUs are complete. | 112 | * Orchestrate the specified type of RCU barrier, waiting for all |
113 | * RCU callbacks of the specified type to complete. | ||
113 | */ | 114 | */ |
114 | void rcu_barrier(void) | 115 | static void _rcu_barrier(enum rcu_barrier type) |
115 | { | 116 | { |
116 | BUG_ON(in_interrupt()); | 117 | BUG_ON(in_interrupt()); |
117 | /* Take cpucontrol mutex to protect against CPU hotplug */ | 118 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
@@ -127,13 +128,39 @@ void rcu_barrier(void) | |||
127 | * until all the callbacks are queued. | 128 | * until all the callbacks are queued. |
128 | */ | 129 | */ |
129 | rcu_read_lock(); | 130 | rcu_read_lock(); |
130 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | 131 | on_each_cpu(rcu_barrier_func, (void *)type, 0, 1); |
131 | rcu_read_unlock(); | 132 | rcu_read_unlock(); |
132 | wait_for_completion(&rcu_barrier_completion); | 133 | wait_for_completion(&rcu_barrier_completion); |
133 | mutex_unlock(&rcu_barrier_mutex); | 134 | mutex_unlock(&rcu_barrier_mutex); |
134 | } | 135 | } |
136 | |||
137 | /** | ||
138 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
139 | */ | ||
140 | void rcu_barrier(void) | ||
141 | { | ||
142 | _rcu_barrier(RCU_BARRIER_STD); | ||
143 | } | ||
135 | EXPORT_SYMBOL_GPL(rcu_barrier); | 144 | EXPORT_SYMBOL_GPL(rcu_barrier); |
136 | 145 | ||
146 | /** | ||
147 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
148 | */ | ||
149 | void rcu_barrier_bh(void) | ||
150 | { | ||
151 | _rcu_barrier(RCU_BARRIER_BH); | ||
152 | } | ||
153 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
154 | |||
155 | /** | ||
156 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
157 | */ | ||
158 | void rcu_barrier_sched(void) | ||
159 | { | ||
160 | _rcu_barrier(RCU_BARRIER_SCHED); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
163 | |||
137 | void __init rcu_init(void) | 164 | void __init rcu_init(void) |
138 | { | 165 | { |
139 | __rcu_init(); | 166 | __rcu_init(); |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 5e02b774070..396b121edfe 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -46,11 +46,11 @@ | |||
46 | #include <asm/atomic.h> | 46 | #include <asm/atomic.h> |
47 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
48 | #include <linux/module.h> | 48 | #include <linux/module.h> |
49 | #include <linux/kthread.h> | ||
49 | #include <linux/completion.h> | 50 | #include <linux/completion.h> |
50 | #include <linux/moduleparam.h> | 51 | #include <linux/moduleparam.h> |
51 | #include <linux/percpu.h> | 52 | #include <linux/percpu.h> |
52 | #include <linux/notifier.h> | 53 | #include <linux/notifier.h> |
53 | #include <linux/rcupdate.h> | ||
54 | #include <linux/cpu.h> | 54 | #include <linux/cpu.h> |
55 | #include <linux/random.h> | 55 | #include <linux/random.h> |
56 | #include <linux/delay.h> | 56 | #include <linux/delay.h> |
@@ -82,14 +82,18 @@ struct rcu_data { | |||
82 | spinlock_t lock; /* Protect rcu_data fields. */ | 82 | spinlock_t lock; /* Protect rcu_data fields. */ |
83 | long completed; /* Number of last completed batch. */ | 83 | long completed; /* Number of last completed batch. */ |
84 | int waitlistcount; | 84 | int waitlistcount; |
85 | struct tasklet_struct rcu_tasklet; | ||
86 | struct rcu_head *nextlist; | 85 | struct rcu_head *nextlist; |
87 | struct rcu_head **nexttail; | 86 | struct rcu_head **nexttail; |
88 | struct rcu_head *waitlist[GP_STAGES]; | 87 | struct rcu_head *waitlist[GP_STAGES]; |
89 | struct rcu_head **waittail[GP_STAGES]; | 88 | struct rcu_head **waittail[GP_STAGES]; |
90 | struct rcu_head *donelist; | 89 | struct rcu_head *donelist; /* from waitlist & waitschedlist */ |
91 | struct rcu_head **donetail; | 90 | struct rcu_head **donetail; |
92 | long rcu_flipctr[2]; | 91 | long rcu_flipctr[2]; |
92 | struct rcu_head *nextschedlist; | ||
93 | struct rcu_head **nextschedtail; | ||
94 | struct rcu_head *waitschedlist; | ||
95 | struct rcu_head **waitschedtail; | ||
96 | int rcu_sched_sleeping; | ||
93 | #ifdef CONFIG_RCU_TRACE | 97 | #ifdef CONFIG_RCU_TRACE |
94 | struct rcupreempt_trace trace; | 98 | struct rcupreempt_trace trace; |
95 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 99 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
@@ -131,11 +135,24 @@ enum rcu_try_flip_states { | |||
131 | rcu_try_flip_waitmb_state, | 135 | rcu_try_flip_waitmb_state, |
132 | }; | 136 | }; |
133 | 137 | ||
138 | /* | ||
139 | * States for rcu_ctrlblk.rcu_sched_sleep. | ||
140 | */ | ||
141 | |||
142 | enum rcu_sched_sleep_states { | ||
143 | rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */ | ||
144 | rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */ | ||
145 | rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */ | ||
146 | }; | ||
147 | |||
134 | struct rcu_ctrlblk { | 148 | struct rcu_ctrlblk { |
135 | spinlock_t fliplock; /* Protect state-machine transitions. */ | 149 | spinlock_t fliplock; /* Protect state-machine transitions. */ |
136 | long completed; /* Number of last completed batch. */ | 150 | long completed; /* Number of last completed batch. */ |
137 | enum rcu_try_flip_states rcu_try_flip_state; /* The current state of | 151 | enum rcu_try_flip_states rcu_try_flip_state; /* The current state of |
138 | the rcu state machine */ | 152 | the rcu state machine */ |
153 | spinlock_t schedlock; /* Protect rcu_sched sleep state. */ | ||
154 | enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */ | ||
155 | wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ | ||
139 | }; | 156 | }; |
140 | 157 | ||
141 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); | 158 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); |
@@ -143,8 +160,12 @@ static struct rcu_ctrlblk rcu_ctrlblk = { | |||
143 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), | 160 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), |
144 | .completed = 0, | 161 | .completed = 0, |
145 | .rcu_try_flip_state = rcu_try_flip_idle_state, | 162 | .rcu_try_flip_state = rcu_try_flip_idle_state, |
163 | .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock), | ||
164 | .sched_sleep = rcu_sched_not_sleeping, | ||
165 | .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq), | ||
146 | }; | 166 | }; |
147 | 167 | ||
168 | static struct task_struct *rcu_sched_grace_period_task; | ||
148 | 169 | ||
149 | #ifdef CONFIG_RCU_TRACE | 170 | #ifdef CONFIG_RCU_TRACE |
150 | static char *rcu_try_flip_state_names[] = | 171 | static char *rcu_try_flip_state_names[] = |
@@ -207,6 +228,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag) | |||
207 | */ | 228 | */ |
208 | #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); | 229 | #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); |
209 | 230 | ||
231 | #define RCU_SCHED_BATCH_TIME (HZ / 50) | ||
232 | |||
210 | /* | 233 | /* |
211 | * Return the number of RCU batches processed thus far. Useful | 234 | * Return the number of RCU batches processed thus far. Useful |
212 | * for debug and statistics. | 235 | * for debug and statistics. |
@@ -411,32 +434,34 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp) | |||
411 | } | 434 | } |
412 | } | 435 | } |
413 | 436 | ||
414 | #ifdef CONFIG_NO_HZ | 437 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { |
438 | .dynticks = 1, | ||
439 | }; | ||
415 | 440 | ||
416 | DEFINE_PER_CPU(long, dynticks_progress_counter) = 1; | 441 | #ifdef CONFIG_NO_HZ |
417 | static DEFINE_PER_CPU(long, rcu_dyntick_snapshot); | ||
418 | static DEFINE_PER_CPU(int, rcu_update_flag); | 442 | static DEFINE_PER_CPU(int, rcu_update_flag); |
419 | 443 | ||
420 | /** | 444 | /** |
421 | * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI. | 445 | * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI. |
422 | * | 446 | * |
423 | * If the CPU was idle with dynamic ticks active, this updates the | 447 | * If the CPU was idle with dynamic ticks active, this updates the |
424 | * dynticks_progress_counter to let the RCU handling know that the | 448 | * rcu_dyntick_sched.dynticks to let the RCU handling know that the |
425 | * CPU is active. | 449 | * CPU is active. |
426 | */ | 450 | */ |
427 | void rcu_irq_enter(void) | 451 | void rcu_irq_enter(void) |
428 | { | 452 | { |
429 | int cpu = smp_processor_id(); | 453 | int cpu = smp_processor_id(); |
454 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
430 | 455 | ||
431 | if (per_cpu(rcu_update_flag, cpu)) | 456 | if (per_cpu(rcu_update_flag, cpu)) |
432 | per_cpu(rcu_update_flag, cpu)++; | 457 | per_cpu(rcu_update_flag, cpu)++; |
433 | 458 | ||
434 | /* | 459 | /* |
435 | * Only update if we are coming from a stopped ticks mode | 460 | * Only update if we are coming from a stopped ticks mode |
436 | * (dynticks_progress_counter is even). | 461 | * (rcu_dyntick_sched.dynticks is even). |
437 | */ | 462 | */ |
438 | if (!in_interrupt() && | 463 | if (!in_interrupt() && |
439 | (per_cpu(dynticks_progress_counter, cpu) & 0x1) == 0) { | 464 | (rdssp->dynticks & 0x1) == 0) { |
440 | /* | 465 | /* |
441 | * The following might seem like we could have a race | 466 | * The following might seem like we could have a race |
442 | * with NMI/SMIs. But this really isn't a problem. | 467 | * with NMI/SMIs. But this really isn't a problem. |
@@ -459,12 +484,12 @@ void rcu_irq_enter(void) | |||
459 | * RCU read-side critical sections on this CPU would | 484 | * RCU read-side critical sections on this CPU would |
460 | * have already completed. | 485 | * have already completed. |
461 | */ | 486 | */ |
462 | per_cpu(dynticks_progress_counter, cpu)++; | 487 | rdssp->dynticks++; |
463 | /* | 488 | /* |
464 | * The following memory barrier ensures that any | 489 | * The following memory barrier ensures that any |
465 | * rcu_read_lock() primitives in the irq handler | 490 | * rcu_read_lock() primitives in the irq handler |
466 | * are seen by other CPUs to follow the above | 491 | * are seen by other CPUs to follow the above |
467 | * increment to dynticks_progress_counter. This is | 492 | * increment to rcu_dyntick_sched.dynticks. This is |
468 | * required in order for other CPUs to correctly | 493 | * required in order for other CPUs to correctly |
469 | * determine when it is safe to advance the RCU | 494 | * determine when it is safe to advance the RCU |
470 | * grace-period state machine. | 495 | * grace-period state machine. |
@@ -472,7 +497,7 @@ void rcu_irq_enter(void) | |||
472 | smp_mb(); /* see above block comment. */ | 497 | smp_mb(); /* see above block comment. */ |
473 | /* | 498 | /* |
474 | * Since we can't determine the dynamic tick mode from | 499 | * Since we can't determine the dynamic tick mode from |
475 | * the dynticks_progress_counter after this routine, | 500 | * the rcu_dyntick_sched.dynticks after this routine, |
476 | * we use a second flag to acknowledge that we came | 501 | * we use a second flag to acknowledge that we came |
477 | * from an idle state with ticks stopped. | 502 | * from an idle state with ticks stopped. |
478 | */ | 503 | */ |
@@ -480,7 +505,7 @@ void rcu_irq_enter(void) | |||
480 | /* | 505 | /* |
481 | * If we take an NMI/SMI now, they will also increment | 506 | * If we take an NMI/SMI now, they will also increment |
482 | * the rcu_update_flag, and will not update the | 507 | * the rcu_update_flag, and will not update the |
483 | * dynticks_progress_counter on exit. That is for | 508 | * rcu_dyntick_sched.dynticks on exit. That is for |
484 | * this IRQ to do. | 509 | * this IRQ to do. |
485 | */ | 510 | */ |
486 | } | 511 | } |
@@ -490,12 +515,13 @@ void rcu_irq_enter(void) | |||
490 | * rcu_irq_exit - Called from exiting Hard irq context. | 515 | * rcu_irq_exit - Called from exiting Hard irq context. |
491 | * | 516 | * |
492 | * If the CPU was idle with dynamic ticks active, update the | 517 | * If the CPU was idle with dynamic ticks active, update the |
493 | * dynticks_progress_counter to put let the RCU handling be | 518 | * rcu_dyntick_sched.dynticks to put let the RCU handling be |
494 | * aware that the CPU is going back to idle with no ticks. | 519 | * aware that the CPU is going back to idle with no ticks. |
495 | */ | 520 | */ |
496 | void rcu_irq_exit(void) | 521 | void rcu_irq_exit(void) |
497 | { | 522 | { |
498 | int cpu = smp_processor_id(); | 523 | int cpu = smp_processor_id(); |
524 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
499 | 525 | ||
500 | /* | 526 | /* |
501 | * rcu_update_flag is set if we interrupted the CPU | 527 | * rcu_update_flag is set if we interrupted the CPU |
@@ -503,7 +529,7 @@ void rcu_irq_exit(void) | |||
503 | * Once this occurs, we keep track of interrupt nesting | 529 | * Once this occurs, we keep track of interrupt nesting |
504 | * because a NMI/SMI could also come in, and we still | 530 | * because a NMI/SMI could also come in, and we still |
505 | * only want the IRQ that started the increment of the | 531 | * only want the IRQ that started the increment of the |
506 | * dynticks_progress_counter to be the one that modifies | 532 | * rcu_dyntick_sched.dynticks to be the one that modifies |
507 | * it on exit. | 533 | * it on exit. |
508 | */ | 534 | */ |
509 | if (per_cpu(rcu_update_flag, cpu)) { | 535 | if (per_cpu(rcu_update_flag, cpu)) { |
@@ -515,28 +541,29 @@ void rcu_irq_exit(void) | |||
515 | 541 | ||
516 | /* | 542 | /* |
517 | * If an NMI/SMI happens now we are still | 543 | * If an NMI/SMI happens now we are still |
518 | * protected by the dynticks_progress_counter being odd. | 544 | * protected by the rcu_dyntick_sched.dynticks being odd. |
519 | */ | 545 | */ |
520 | 546 | ||
521 | /* | 547 | /* |
522 | * The following memory barrier ensures that any | 548 | * The following memory barrier ensures that any |
523 | * rcu_read_unlock() primitives in the irq handler | 549 | * rcu_read_unlock() primitives in the irq handler |
524 | * are seen by other CPUs to preceed the following | 550 | * are seen by other CPUs to preceed the following |
525 | * increment to dynticks_progress_counter. This | 551 | * increment to rcu_dyntick_sched.dynticks. This |
526 | * is required in order for other CPUs to determine | 552 | * is required in order for other CPUs to determine |
527 | * when it is safe to advance the RCU grace-period | 553 | * when it is safe to advance the RCU grace-period |
528 | * state machine. | 554 | * state machine. |
529 | */ | 555 | */ |
530 | smp_mb(); /* see above block comment. */ | 556 | smp_mb(); /* see above block comment. */ |
531 | per_cpu(dynticks_progress_counter, cpu)++; | 557 | rdssp->dynticks++; |
532 | WARN_ON(per_cpu(dynticks_progress_counter, cpu) & 0x1); | 558 | WARN_ON(rdssp->dynticks & 0x1); |
533 | } | 559 | } |
534 | } | 560 | } |
535 | 561 | ||
536 | static void dyntick_save_progress_counter(int cpu) | 562 | static void dyntick_save_progress_counter(int cpu) |
537 | { | 563 | { |
538 | per_cpu(rcu_dyntick_snapshot, cpu) = | 564 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
539 | per_cpu(dynticks_progress_counter, cpu); | 565 | |
566 | rdssp->dynticks_snap = rdssp->dynticks; | ||
540 | } | 567 | } |
541 | 568 | ||
542 | static inline int | 569 | static inline int |
@@ -544,9 +571,10 @@ rcu_try_flip_waitack_needed(int cpu) | |||
544 | { | 571 | { |
545 | long curr; | 572 | long curr; |
546 | long snap; | 573 | long snap; |
574 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
547 | 575 | ||
548 | curr = per_cpu(dynticks_progress_counter, cpu); | 576 | curr = rdssp->dynticks; |
549 | snap = per_cpu(rcu_dyntick_snapshot, cpu); | 577 | snap = rdssp->dynticks_snap; |
550 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | 578 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ |
551 | 579 | ||
552 | /* | 580 | /* |
@@ -567,7 +595,7 @@ rcu_try_flip_waitack_needed(int cpu) | |||
567 | * that this CPU already acknowledged the counter. | 595 | * that this CPU already acknowledged the counter. |
568 | */ | 596 | */ |
569 | 597 | ||
570 | if ((curr - snap) > 2 || (snap & 0x1) == 0) | 598 | if ((curr - snap) > 2 || (curr & 0x1) == 0) |
571 | return 0; | 599 | return 0; |
572 | 600 | ||
573 | /* We need this CPU to explicitly acknowledge the counter flip. */ | 601 | /* We need this CPU to explicitly acknowledge the counter flip. */ |
@@ -580,9 +608,10 @@ rcu_try_flip_waitmb_needed(int cpu) | |||
580 | { | 608 | { |
581 | long curr; | 609 | long curr; |
582 | long snap; | 610 | long snap; |
611 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
583 | 612 | ||
584 | curr = per_cpu(dynticks_progress_counter, cpu); | 613 | curr = rdssp->dynticks; |
585 | snap = per_cpu(rcu_dyntick_snapshot, cpu); | 614 | snap = rdssp->dynticks_snap; |
586 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | 615 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ |
587 | 616 | ||
588 | /* | 617 | /* |
@@ -609,14 +638,86 @@ rcu_try_flip_waitmb_needed(int cpu) | |||
609 | return 1; | 638 | return 1; |
610 | } | 639 | } |
611 | 640 | ||
641 | static void dyntick_save_progress_counter_sched(int cpu) | ||
642 | { | ||
643 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
644 | |||
645 | rdssp->sched_dynticks_snap = rdssp->dynticks; | ||
646 | } | ||
647 | |||
648 | static int rcu_qsctr_inc_needed_dyntick(int cpu) | ||
649 | { | ||
650 | long curr; | ||
651 | long snap; | ||
652 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
653 | |||
654 | curr = rdssp->dynticks; | ||
655 | snap = rdssp->sched_dynticks_snap; | ||
656 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
657 | |||
658 | /* | ||
659 | * If the CPU remained in dynticks mode for the entire time | ||
660 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
661 | * then it cannot be in the middle of an rcu_read_lock(), so | ||
662 | * the next rcu_read_lock() it executes must use the new value | ||
663 | * of the counter. Therefore, this CPU has been in a quiescent | ||
664 | * state the entire time, and we don't need to wait for it. | ||
665 | */ | ||
666 | |||
667 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
668 | return 0; | ||
669 | |||
670 | /* | ||
671 | * If the CPU passed through or entered a dynticks idle phase with | ||
672 | * no active irq handlers, then, as above, this CPU has already | ||
673 | * passed through a quiescent state. | ||
674 | */ | ||
675 | |||
676 | if ((curr - snap) > 2 || (snap & 0x1) == 0) | ||
677 | return 0; | ||
678 | |||
679 | /* We need this CPU to go through a quiescent state. */ | ||
680 | |||
681 | return 1; | ||
682 | } | ||
683 | |||
612 | #else /* !CONFIG_NO_HZ */ | 684 | #else /* !CONFIG_NO_HZ */ |
613 | 685 | ||
614 | # define dyntick_save_progress_counter(cpu) do { } while (0) | 686 | # define dyntick_save_progress_counter(cpu) do { } while (0) |
615 | # define rcu_try_flip_waitack_needed(cpu) (1) | 687 | # define rcu_try_flip_waitack_needed(cpu) (1) |
616 | # define rcu_try_flip_waitmb_needed(cpu) (1) | 688 | # define rcu_try_flip_waitmb_needed(cpu) (1) |
689 | |||
690 | # define dyntick_save_progress_counter_sched(cpu) do { } while (0) | ||
691 | # define rcu_qsctr_inc_needed_dyntick(cpu) (1) | ||
617 | 692 | ||
618 | #endif /* CONFIG_NO_HZ */ | 693 | #endif /* CONFIG_NO_HZ */ |
619 | 694 | ||
695 | static void save_qsctr_sched(int cpu) | ||
696 | { | ||
697 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
698 | |||
699 | rdssp->sched_qs_snap = rdssp->sched_qs; | ||
700 | } | ||
701 | |||
702 | static inline int rcu_qsctr_inc_needed(int cpu) | ||
703 | { | ||
704 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
705 | |||
706 | /* | ||
707 | * If there has been a quiescent state, no more need to wait | ||
708 | * on this CPU. | ||
709 | */ | ||
710 | |||
711 | if (rdssp->sched_qs != rdssp->sched_qs_snap) { | ||
712 | smp_mb(); /* force ordering with cpu entering schedule(). */ | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | /* We need this CPU to go through a quiescent state. */ | ||
717 | |||
718 | return 1; | ||
719 | } | ||
720 | |||
620 | /* | 721 | /* |
621 | * Get here when RCU is idle. Decide whether we need to | 722 | * Get here when RCU is idle. Decide whether we need to |
622 | * move out of idle state, and return non-zero if so. | 723 | * move out of idle state, and return non-zero if so. |
@@ -819,6 +920,26 @@ void rcu_check_callbacks(int cpu, int user) | |||
819 | unsigned long flags; | 920 | unsigned long flags; |
820 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | 921 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
821 | 922 | ||
923 | /* | ||
924 | * If this CPU took its interrupt from user mode or from the | ||
925 | * idle loop, and this is not a nested interrupt, then | ||
926 | * this CPU has to have exited all prior preept-disable | ||
927 | * sections of code. So increment the counter to note this. | ||
928 | * | ||
929 | * The memory barrier is needed to handle the case where | ||
930 | * writes from a preempt-disable section of code get reordered | ||
931 | * into schedule() by this CPU's write buffer. So the memory | ||
932 | * barrier makes sure that the rcu_qsctr_inc() is seen by other | ||
933 | * CPUs to happen after any such write. | ||
934 | */ | ||
935 | |||
936 | if (user || | ||
937 | (idle_cpu(cpu) && !in_softirq() && | ||
938 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | ||
939 | smp_mb(); /* Guard against aggressive schedule(). */ | ||
940 | rcu_qsctr_inc(cpu); | ||
941 | } | ||
942 | |||
822 | rcu_check_mb(cpu); | 943 | rcu_check_mb(cpu); |
823 | if (rcu_ctrlblk.completed == rdp->completed) | 944 | if (rcu_ctrlblk.completed == rdp->completed) |
824 | rcu_try_flip(); | 945 | rcu_try_flip(); |
@@ -869,6 +990,8 @@ void rcu_offline_cpu(int cpu) | |||
869 | struct rcu_head *list = NULL; | 990 | struct rcu_head *list = NULL; |
870 | unsigned long flags; | 991 | unsigned long flags; |
871 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | 992 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
993 | struct rcu_head *schedlist = NULL; | ||
994 | struct rcu_head **schedtail = &schedlist; | ||
872 | struct rcu_head **tail = &list; | 995 | struct rcu_head **tail = &list; |
873 | 996 | ||
874 | /* | 997 | /* |
@@ -882,6 +1005,11 @@ void rcu_offline_cpu(int cpu) | |||
882 | rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], | 1005 | rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], |
883 | list, tail); | 1006 | list, tail); |
884 | rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); | 1007 | rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); |
1008 | rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail, | ||
1009 | schedlist, schedtail); | ||
1010 | rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail, | ||
1011 | schedlist, schedtail); | ||
1012 | rdp->rcu_sched_sleeping = 0; | ||
885 | spin_unlock_irqrestore(&rdp->lock, flags); | 1013 | spin_unlock_irqrestore(&rdp->lock, flags); |
886 | rdp->waitlistcount = 0; | 1014 | rdp->waitlistcount = 0; |
887 | 1015 | ||
@@ -916,22 +1044,40 @@ void rcu_offline_cpu(int cpu) | |||
916 | * fix. | 1044 | * fix. |
917 | */ | 1045 | */ |
918 | 1046 | ||
919 | local_irq_save(flags); | 1047 | local_irq_save(flags); /* disable preempt till we know what lock. */ |
920 | rdp = RCU_DATA_ME(); | 1048 | rdp = RCU_DATA_ME(); |
921 | spin_lock(&rdp->lock); | 1049 | spin_lock(&rdp->lock); |
922 | *rdp->nexttail = list; | 1050 | *rdp->nexttail = list; |
923 | if (list) | 1051 | if (list) |
924 | rdp->nexttail = tail; | 1052 | rdp->nexttail = tail; |
1053 | *rdp->nextschedtail = schedlist; | ||
1054 | if (schedlist) | ||
1055 | rdp->nextschedtail = schedtail; | ||
925 | spin_unlock_irqrestore(&rdp->lock, flags); | 1056 | spin_unlock_irqrestore(&rdp->lock, flags); |
926 | } | 1057 | } |
927 | 1058 | ||
928 | void __devinit rcu_online_cpu(int cpu) | 1059 | void __devinit rcu_online_cpu(int cpu) |
929 | { | 1060 | { |
930 | unsigned long flags; | 1061 | unsigned long flags; |
1062 | struct rcu_data *rdp; | ||
931 | 1063 | ||
932 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | 1064 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
933 | cpu_set(cpu, rcu_cpu_online_map); | 1065 | cpu_set(cpu, rcu_cpu_online_map); |
934 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1066 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
1067 | |||
1068 | /* | ||
1069 | * The rcu_sched grace-period processing might have bypassed | ||
1070 | * this CPU, given that it was not in the rcu_cpu_online_map | ||
1071 | * when the grace-period scan started. This means that the | ||
1072 | * grace-period task might sleep. So make sure that if this | ||
1073 | * should happen, the first callback posted to this CPU will | ||
1074 | * wake up the grace-period task if need be. | ||
1075 | */ | ||
1076 | |||
1077 | rdp = RCU_DATA_CPU(cpu); | ||
1078 | spin_lock_irqsave(&rdp->lock, flags); | ||
1079 | rdp->rcu_sched_sleeping = 1; | ||
1080 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
935 | } | 1081 | } |
936 | 1082 | ||
937 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1083 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -986,31 +1132,196 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
986 | *rdp->nexttail = head; | 1132 | *rdp->nexttail = head; |
987 | rdp->nexttail = &head->next; | 1133 | rdp->nexttail = &head->next; |
988 | RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); | 1134 | RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); |
989 | spin_unlock(&rdp->lock); | 1135 | spin_unlock_irqrestore(&rdp->lock, flags); |
990 | local_irq_restore(flags); | ||
991 | } | 1136 | } |
992 | EXPORT_SYMBOL_GPL(call_rcu); | 1137 | EXPORT_SYMBOL_GPL(call_rcu); |
993 | 1138 | ||
1139 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
1140 | { | ||
1141 | unsigned long flags; | ||
1142 | struct rcu_data *rdp; | ||
1143 | int wake_gp = 0; | ||
1144 | |||
1145 | head->func = func; | ||
1146 | head->next = NULL; | ||
1147 | local_irq_save(flags); | ||
1148 | rdp = RCU_DATA_ME(); | ||
1149 | spin_lock(&rdp->lock); | ||
1150 | *rdp->nextschedtail = head; | ||
1151 | rdp->nextschedtail = &head->next; | ||
1152 | if (rdp->rcu_sched_sleeping) { | ||
1153 | |||
1154 | /* Grace-period processing might be sleeping... */ | ||
1155 | |||
1156 | rdp->rcu_sched_sleeping = 0; | ||
1157 | wake_gp = 1; | ||
1158 | } | ||
1159 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1160 | if (wake_gp) { | ||
1161 | |||
1162 | /* Wake up grace-period processing, unless someone beat us. */ | ||
1163 | |||
1164 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1165 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping) | ||
1166 | wake_gp = 0; | ||
1167 | rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping; | ||
1168 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1169 | if (wake_gp) | ||
1170 | wake_up_interruptible(&rcu_ctrlblk.sched_wq); | ||
1171 | } | ||
1172 | } | ||
1173 | EXPORT_SYMBOL_GPL(call_rcu_sched); | ||
1174 | |||
994 | /* | 1175 | /* |
995 | * Wait until all currently running preempt_disable() code segments | 1176 | * Wait until all currently running preempt_disable() code segments |
996 | * (including hardware-irq-disable segments) complete. Note that | 1177 | * (including hardware-irq-disable segments) complete. Note that |
997 | * in -rt this does -not- necessarily result in all currently executing | 1178 | * in -rt this does -not- necessarily result in all currently executing |
998 | * interrupt -handlers- having completed. | 1179 | * interrupt -handlers- having completed. |
999 | */ | 1180 | */ |
1000 | void __synchronize_sched(void) | 1181 | synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched) |
1182 | EXPORT_SYMBOL_GPL(__synchronize_sched); | ||
1183 | |||
1184 | /* | ||
1185 | * kthread function that manages call_rcu_sched grace periods. | ||
1186 | */ | ||
1187 | static int rcu_sched_grace_period(void *arg) | ||
1001 | { | 1188 | { |
1002 | cpumask_t oldmask; | 1189 | int couldsleep; /* might sleep after current pass. */ |
1190 | int couldsleepnext = 0; /* might sleep after next pass. */ | ||
1003 | int cpu; | 1191 | int cpu; |
1192 | unsigned long flags; | ||
1193 | struct rcu_data *rdp; | ||
1194 | int ret; | ||
1004 | 1195 | ||
1005 | if (sched_getaffinity(0, &oldmask) < 0) | 1196 | /* |
1006 | oldmask = cpu_possible_map; | 1197 | * Each pass through the following loop handles one |
1007 | for_each_online_cpu(cpu) { | 1198 | * rcu_sched grace period cycle. |
1008 | sched_setaffinity(0, &cpumask_of_cpu(cpu)); | 1199 | */ |
1009 | schedule(); | 1200 | do { |
1010 | } | 1201 | /* Save each CPU's current state. */ |
1011 | sched_setaffinity(0, &oldmask); | 1202 | |
1203 | for_each_online_cpu(cpu) { | ||
1204 | dyntick_save_progress_counter_sched(cpu); | ||
1205 | save_qsctr_sched(cpu); | ||
1206 | } | ||
1207 | |||
1208 | /* | ||
1209 | * Sleep for about an RCU grace-period's worth to | ||
1210 | * allow better batching and to consume less CPU. | ||
1211 | */ | ||
1212 | schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME); | ||
1213 | |||
1214 | /* | ||
1215 | * If there was nothing to do last time, prepare to | ||
1216 | * sleep at the end of the current grace period cycle. | ||
1217 | */ | ||
1218 | couldsleep = couldsleepnext; | ||
1219 | couldsleepnext = 1; | ||
1220 | if (couldsleep) { | ||
1221 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1222 | rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep; | ||
1223 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Wait on each CPU in turn to have either visited | ||
1228 | * a quiescent state or been in dynticks-idle mode. | ||
1229 | */ | ||
1230 | for_each_online_cpu(cpu) { | ||
1231 | while (rcu_qsctr_inc_needed(cpu) && | ||
1232 | rcu_qsctr_inc_needed_dyntick(cpu)) { | ||
1233 | /* resched_cpu(cpu); @@@ */ | ||
1234 | schedule_timeout_interruptible(1); | ||
1235 | } | ||
1236 | } | ||
1237 | |||
1238 | /* Advance callbacks for each CPU. */ | ||
1239 | |||
1240 | for_each_online_cpu(cpu) { | ||
1241 | |||
1242 | rdp = RCU_DATA_CPU(cpu); | ||
1243 | spin_lock_irqsave(&rdp->lock, flags); | ||
1244 | |||
1245 | /* | ||
1246 | * We are running on this CPU irq-disabled, so no | ||
1247 | * CPU can go offline until we re-enable irqs. | ||
1248 | * The current CPU might have already gone | ||
1249 | * offline (between the for_each_offline_cpu and | ||
1250 | * the spin_lock_irqsave), but in that case all its | ||
1251 | * callback lists will be empty, so no harm done. | ||
1252 | * | ||
1253 | * Advance the callbacks! We share normal RCU's | ||
1254 | * donelist, since callbacks are invoked the | ||
1255 | * same way in either case. | ||
1256 | */ | ||
1257 | if (rdp->waitschedlist != NULL) { | ||
1258 | *rdp->donetail = rdp->waitschedlist; | ||
1259 | rdp->donetail = rdp->waitschedtail; | ||
1260 | |||
1261 | /* | ||
1262 | * Next rcu_check_callbacks() will | ||
1263 | * do the required raise_softirq(). | ||
1264 | */ | ||
1265 | } | ||
1266 | if (rdp->nextschedlist != NULL) { | ||
1267 | rdp->waitschedlist = rdp->nextschedlist; | ||
1268 | rdp->waitschedtail = rdp->nextschedtail; | ||
1269 | couldsleep = 0; | ||
1270 | couldsleepnext = 0; | ||
1271 | } else { | ||
1272 | rdp->waitschedlist = NULL; | ||
1273 | rdp->waitschedtail = &rdp->waitschedlist; | ||
1274 | } | ||
1275 | rdp->nextschedlist = NULL; | ||
1276 | rdp->nextschedtail = &rdp->nextschedlist; | ||
1277 | |||
1278 | /* Mark sleep intention. */ | ||
1279 | |||
1280 | rdp->rcu_sched_sleeping = couldsleep; | ||
1281 | |||
1282 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1283 | } | ||
1284 | |||
1285 | /* If we saw callbacks on the last scan, go deal with them. */ | ||
1286 | |||
1287 | if (!couldsleep) | ||
1288 | continue; | ||
1289 | |||
1290 | /* Attempt to block... */ | ||
1291 | |||
1292 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1293 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) { | ||
1294 | |||
1295 | /* | ||
1296 | * Someone posted a callback after we scanned. | ||
1297 | * Go take care of it. | ||
1298 | */ | ||
1299 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1300 | couldsleepnext = 0; | ||
1301 | continue; | ||
1302 | } | ||
1303 | |||
1304 | /* Block until the next person posts a callback. */ | ||
1305 | |||
1306 | rcu_ctrlblk.sched_sleep = rcu_sched_sleeping; | ||
1307 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1308 | ret = 0; | ||
1309 | __wait_event_interruptible(rcu_ctrlblk.sched_wq, | ||
1310 | rcu_ctrlblk.sched_sleep != rcu_sched_sleeping, | ||
1311 | ret); | ||
1312 | |||
1313 | /* | ||
1314 | * Signals would prevent us from sleeping, and we cannot | ||
1315 | * do much with them in any case. So flush them. | ||
1316 | */ | ||
1317 | if (ret) | ||
1318 | flush_signals(current); | ||
1319 | couldsleepnext = 0; | ||
1320 | |||
1321 | } while (!kthread_should_stop()); | ||
1322 | |||
1323 | return (0); | ||
1012 | } | 1324 | } |
1013 | EXPORT_SYMBOL_GPL(__synchronize_sched); | ||
1014 | 1325 | ||
1015 | /* | 1326 | /* |
1016 | * Check to see if any future RCU-related work will need to be done | 1327 | * Check to see if any future RCU-related work will need to be done |
@@ -1027,7 +1338,9 @@ int rcu_needs_cpu(int cpu) | |||
1027 | 1338 | ||
1028 | return (rdp->donelist != NULL || | 1339 | return (rdp->donelist != NULL || |
1029 | !!rdp->waitlistcount || | 1340 | !!rdp->waitlistcount || |
1030 | rdp->nextlist != NULL); | 1341 | rdp->nextlist != NULL || |
1342 | rdp->nextschedlist != NULL || | ||
1343 | rdp->waitschedlist != NULL); | ||
1031 | } | 1344 | } |
1032 | 1345 | ||
1033 | int rcu_pending(int cpu) | 1346 | int rcu_pending(int cpu) |
@@ -1038,7 +1351,9 @@ int rcu_pending(int cpu) | |||
1038 | 1351 | ||
1039 | if (rdp->donelist != NULL || | 1352 | if (rdp->donelist != NULL || |
1040 | !!rdp->waitlistcount || | 1353 | !!rdp->waitlistcount || |
1041 | rdp->nextlist != NULL) | 1354 | rdp->nextlist != NULL || |
1355 | rdp->nextschedlist != NULL || | ||
1356 | rdp->waitschedlist != NULL) | ||
1042 | return 1; | 1357 | return 1; |
1043 | 1358 | ||
1044 | /* The RCU core needs an acknowledgement from this CPU. */ | 1359 | /* The RCU core needs an acknowledgement from this CPU. */ |
@@ -1105,6 +1420,11 @@ void __init __rcu_init(void) | |||
1105 | rdp->donetail = &rdp->donelist; | 1420 | rdp->donetail = &rdp->donelist; |
1106 | rdp->rcu_flipctr[0] = 0; | 1421 | rdp->rcu_flipctr[0] = 0; |
1107 | rdp->rcu_flipctr[1] = 0; | 1422 | rdp->rcu_flipctr[1] = 0; |
1423 | rdp->nextschedlist = NULL; | ||
1424 | rdp->nextschedtail = &rdp->nextschedlist; | ||
1425 | rdp->waitschedlist = NULL; | ||
1426 | rdp->waitschedtail = &rdp->waitschedlist; | ||
1427 | rdp->rcu_sched_sleeping = 0; | ||
1108 | } | 1428 | } |
1109 | register_cpu_notifier(&rcu_nb); | 1429 | register_cpu_notifier(&rcu_nb); |
1110 | 1430 | ||
@@ -1127,11 +1447,15 @@ void __init __rcu_init(void) | |||
1127 | } | 1447 | } |
1128 | 1448 | ||
1129 | /* | 1449 | /* |
1130 | * Deprecated, use synchronize_rcu() or synchronize_sched() instead. | 1450 | * Late-boot-time RCU initialization that must wait until after scheduler |
1451 | * has been initialized. | ||
1131 | */ | 1452 | */ |
1132 | void synchronize_kernel(void) | 1453 | void __init rcu_init_sched(void) |
1133 | { | 1454 | { |
1134 | synchronize_rcu(); | 1455 | rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period, |
1456 | NULL, | ||
1457 | "rcu_sched_grace_period"); | ||
1458 | WARN_ON(IS_ERR(rcu_sched_grace_period_task)); | ||
1135 | } | 1459 | } |
1136 | 1460 | ||
1137 | #ifdef CONFIG_RCU_TRACE | 1461 | #ifdef CONFIG_RCU_TRACE |
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c index 49ac4947af2..5edf82c34bb 100644 --- a/kernel/rcupreempt_trace.c +++ b/kernel/rcupreempt_trace.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
39 | #include <linux/percpu.h> | 39 | #include <linux/percpu.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/rcupdate.h> | ||
42 | #include <linux/cpu.h> | 41 | #include <linux/cpu.h> |
43 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
44 | #include <linux/rcupreempt_trace.h> | 43 | #include <linux/rcupreempt_trace.h> |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 33acc424667..27003e2421c 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -57,7 +57,8 @@ static int stat_interval; /* Interval between stats, in seconds. */ | |||
57 | /* Defaults to "only at end of test". */ | 57 | /* Defaults to "only at end of test". */ |
58 | static int verbose; /* Print more debug info. */ | 58 | static int verbose; /* Print more debug info. */ |
59 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | 59 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ |
60 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ | 60 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ |
61 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | ||
61 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | 62 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ |
62 | 63 | ||
63 | module_param(nreaders, int, 0444); | 64 | module_param(nreaders, int, 0444); |
@@ -72,6 +73,8 @@ module_param(test_no_idle_hz, bool, 0444); | |||
72 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | 73 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); |
73 | module_param(shuffle_interval, int, 0444); | 74 | module_param(shuffle_interval, int, 0444); |
74 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | 75 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); |
76 | module_param(stutter, int, 0444); | ||
77 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | ||
75 | module_param(torture_type, charp, 0444); | 78 | module_param(torture_type, charp, 0444); |
76 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 79 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); |
77 | 80 | ||
@@ -91,6 +94,7 @@ static struct task_struct **fakewriter_tasks; | |||
91 | static struct task_struct **reader_tasks; | 94 | static struct task_struct **reader_tasks; |
92 | static struct task_struct *stats_task; | 95 | static struct task_struct *stats_task; |
93 | static struct task_struct *shuffler_task; | 96 | static struct task_struct *shuffler_task; |
97 | static struct task_struct *stutter_task; | ||
94 | 98 | ||
95 | #define RCU_TORTURE_PIPE_LEN 10 | 99 | #define RCU_TORTURE_PIPE_LEN 10 |
96 | 100 | ||
@@ -119,6 +123,15 @@ static atomic_t n_rcu_torture_mberror; | |||
119 | static atomic_t n_rcu_torture_error; | 123 | static atomic_t n_rcu_torture_error; |
120 | static struct list_head rcu_torture_removed; | 124 | static struct list_head rcu_torture_removed; |
121 | 125 | ||
126 | static int stutter_pause_test = 0; | ||
127 | |||
128 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | ||
129 | #define RCUTORTURE_RUNNABLE_INIT 1 | ||
130 | #else | ||
131 | #define RCUTORTURE_RUNNABLE_INIT 0 | ||
132 | #endif | ||
133 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | ||
134 | |||
122 | /* | 135 | /* |
123 | * Allocate an element from the rcu_tortures pool. | 136 | * Allocate an element from the rcu_tortures pool. |
124 | */ | 137 | */ |
@@ -179,6 +192,13 @@ rcu_random(struct rcu_random_state *rrsp) | |||
179 | return swahw32(rrsp->rrs_state); | 192 | return swahw32(rrsp->rrs_state); |
180 | } | 193 | } |
181 | 194 | ||
195 | static void | ||
196 | rcu_stutter_wait(void) | ||
197 | { | ||
198 | while (stutter_pause_test || !rcutorture_runnable) | ||
199 | schedule_timeout_interruptible(1); | ||
200 | } | ||
201 | |||
182 | /* | 202 | /* |
183 | * Operations vector for selecting different types of tests. | 203 | * Operations vector for selecting different types of tests. |
184 | */ | 204 | */ |
@@ -192,6 +212,7 @@ struct rcu_torture_ops { | |||
192 | int (*completed)(void); | 212 | int (*completed)(void); |
193 | void (*deferredfree)(struct rcu_torture *p); | 213 | void (*deferredfree)(struct rcu_torture *p); |
194 | void (*sync)(void); | 214 | void (*sync)(void); |
215 | void (*cb_barrier)(void); | ||
195 | int (*stats)(char *page); | 216 | int (*stats)(char *page); |
196 | char *name; | 217 | char *name; |
197 | }; | 218 | }; |
@@ -265,6 +286,7 @@ static struct rcu_torture_ops rcu_ops = { | |||
265 | .completed = rcu_torture_completed, | 286 | .completed = rcu_torture_completed, |
266 | .deferredfree = rcu_torture_deferred_free, | 287 | .deferredfree = rcu_torture_deferred_free, |
267 | .sync = synchronize_rcu, | 288 | .sync = synchronize_rcu, |
289 | .cb_barrier = rcu_barrier, | ||
268 | .stats = NULL, | 290 | .stats = NULL, |
269 | .name = "rcu" | 291 | .name = "rcu" |
270 | }; | 292 | }; |
@@ -304,6 +326,7 @@ static struct rcu_torture_ops rcu_sync_ops = { | |||
304 | .completed = rcu_torture_completed, | 326 | .completed = rcu_torture_completed, |
305 | .deferredfree = rcu_sync_torture_deferred_free, | 327 | .deferredfree = rcu_sync_torture_deferred_free, |
306 | .sync = synchronize_rcu, | 328 | .sync = synchronize_rcu, |
329 | .cb_barrier = NULL, | ||
307 | .stats = NULL, | 330 | .stats = NULL, |
308 | .name = "rcu_sync" | 331 | .name = "rcu_sync" |
309 | }; | 332 | }; |
@@ -364,6 +387,7 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
364 | .completed = rcu_bh_torture_completed, | 387 | .completed = rcu_bh_torture_completed, |
365 | .deferredfree = rcu_bh_torture_deferred_free, | 388 | .deferredfree = rcu_bh_torture_deferred_free, |
366 | .sync = rcu_bh_torture_synchronize, | 389 | .sync = rcu_bh_torture_synchronize, |
390 | .cb_barrier = rcu_barrier_bh, | ||
367 | .stats = NULL, | 391 | .stats = NULL, |
368 | .name = "rcu_bh" | 392 | .name = "rcu_bh" |
369 | }; | 393 | }; |
@@ -377,6 +401,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = { | |||
377 | .completed = rcu_bh_torture_completed, | 401 | .completed = rcu_bh_torture_completed, |
378 | .deferredfree = rcu_sync_torture_deferred_free, | 402 | .deferredfree = rcu_sync_torture_deferred_free, |
379 | .sync = rcu_bh_torture_synchronize, | 403 | .sync = rcu_bh_torture_synchronize, |
404 | .cb_barrier = NULL, | ||
380 | .stats = NULL, | 405 | .stats = NULL, |
381 | .name = "rcu_bh_sync" | 406 | .name = "rcu_bh_sync" |
382 | }; | 407 | }; |
@@ -458,6 +483,7 @@ static struct rcu_torture_ops srcu_ops = { | |||
458 | .completed = srcu_torture_completed, | 483 | .completed = srcu_torture_completed, |
459 | .deferredfree = rcu_sync_torture_deferred_free, | 484 | .deferredfree = rcu_sync_torture_deferred_free, |
460 | .sync = srcu_torture_synchronize, | 485 | .sync = srcu_torture_synchronize, |
486 | .cb_barrier = NULL, | ||
461 | .stats = srcu_torture_stats, | 487 | .stats = srcu_torture_stats, |
462 | .name = "srcu" | 488 | .name = "srcu" |
463 | }; | 489 | }; |
@@ -482,6 +508,11 @@ static int sched_torture_completed(void) | |||
482 | return 0; | 508 | return 0; |
483 | } | 509 | } |
484 | 510 | ||
511 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) | ||
512 | { | ||
513 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | ||
514 | } | ||
515 | |||
485 | static void sched_torture_synchronize(void) | 516 | static void sched_torture_synchronize(void) |
486 | { | 517 | { |
487 | synchronize_sched(); | 518 | synchronize_sched(); |
@@ -494,12 +525,27 @@ static struct rcu_torture_ops sched_ops = { | |||
494 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 525 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ |
495 | .readunlock = sched_torture_read_unlock, | 526 | .readunlock = sched_torture_read_unlock, |
496 | .completed = sched_torture_completed, | 527 | .completed = sched_torture_completed, |
497 | .deferredfree = rcu_sync_torture_deferred_free, | 528 | .deferredfree = rcu_sched_torture_deferred_free, |
498 | .sync = sched_torture_synchronize, | 529 | .sync = sched_torture_synchronize, |
530 | .cb_barrier = rcu_barrier_sched, | ||
499 | .stats = NULL, | 531 | .stats = NULL, |
500 | .name = "sched" | 532 | .name = "sched" |
501 | }; | 533 | }; |
502 | 534 | ||
535 | static struct rcu_torture_ops sched_ops_sync = { | ||
536 | .init = rcu_sync_torture_init, | ||
537 | .cleanup = NULL, | ||
538 | .readlock = sched_torture_read_lock, | ||
539 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | ||
540 | .readunlock = sched_torture_read_unlock, | ||
541 | .completed = sched_torture_completed, | ||
542 | .deferredfree = rcu_sync_torture_deferred_free, | ||
543 | .sync = sched_torture_synchronize, | ||
544 | .cb_barrier = NULL, | ||
545 | .stats = NULL, | ||
546 | .name = "sched_sync" | ||
547 | }; | ||
548 | |||
503 | /* | 549 | /* |
504 | * RCU torture writer kthread. Repeatedly substitutes a new structure | 550 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
505 | * for that pointed to by rcu_torture_current, freeing the old structure | 551 | * for that pointed to by rcu_torture_current, freeing the old structure |
@@ -537,6 +583,7 @@ rcu_torture_writer(void *arg) | |||
537 | } | 583 | } |
538 | rcu_torture_current_version++; | 584 | rcu_torture_current_version++; |
539 | oldbatch = cur_ops->completed(); | 585 | oldbatch = cur_ops->completed(); |
586 | rcu_stutter_wait(); | ||
540 | } while (!kthread_should_stop() && !fullstop); | 587 | } while (!kthread_should_stop() && !fullstop); |
541 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 588 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
542 | while (!kthread_should_stop()) | 589 | while (!kthread_should_stop()) |
@@ -560,6 +607,7 @@ rcu_torture_fakewriter(void *arg) | |||
560 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 607 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); |
561 | udelay(rcu_random(&rand) & 0x3ff); | 608 | udelay(rcu_random(&rand) & 0x3ff); |
562 | cur_ops->sync(); | 609 | cur_ops->sync(); |
610 | rcu_stutter_wait(); | ||
563 | } while (!kthread_should_stop() && !fullstop); | 611 | } while (!kthread_should_stop() && !fullstop); |
564 | 612 | ||
565 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 613 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
@@ -615,6 +663,7 @@ rcu_torture_reader(void *arg) | |||
615 | preempt_enable(); | 663 | preempt_enable(); |
616 | cur_ops->readunlock(idx); | 664 | cur_ops->readunlock(idx); |
617 | schedule(); | 665 | schedule(); |
666 | rcu_stutter_wait(); | ||
618 | } while (!kthread_should_stop() && !fullstop); | 667 | } while (!kthread_should_stop() && !fullstop); |
619 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 668 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
620 | while (!kthread_should_stop()) | 669 | while (!kthread_should_stop()) |
@@ -661,6 +710,7 @@ rcu_torture_printk(char *page) | |||
661 | if (i > 1) { | 710 | if (i > 1) { |
662 | cnt += sprintf(&page[cnt], "!!! "); | 711 | cnt += sprintf(&page[cnt], "!!! "); |
663 | atomic_inc(&n_rcu_torture_error); | 712 | atomic_inc(&n_rcu_torture_error); |
713 | WARN_ON_ONCE(1); | ||
664 | } | 714 | } |
665 | cnt += sprintf(&page[cnt], "Reader Pipe: "); | 715 | cnt += sprintf(&page[cnt], "Reader Pipe: "); |
666 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 716 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
@@ -785,15 +835,34 @@ rcu_torture_shuffle(void *arg) | |||
785 | return 0; | 835 | return 0; |
786 | } | 836 | } |
787 | 837 | ||
838 | /* Cause the rcutorture test to "stutter", starting and stopping all | ||
839 | * threads periodically. | ||
840 | */ | ||
841 | static int | ||
842 | rcu_torture_stutter(void *arg) | ||
843 | { | ||
844 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task started"); | ||
845 | do { | ||
846 | schedule_timeout_interruptible(stutter * HZ); | ||
847 | stutter_pause_test = 1; | ||
848 | if (!kthread_should_stop()) | ||
849 | schedule_timeout_interruptible(stutter * HZ); | ||
850 | stutter_pause_test = 0; | ||
851 | } while (!kthread_should_stop()); | ||
852 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | ||
853 | return 0; | ||
854 | } | ||
855 | |||
788 | static inline void | 856 | static inline void |
789 | rcu_torture_print_module_parms(char *tag) | 857 | rcu_torture_print_module_parms(char *tag) |
790 | { | 858 | { |
791 | printk(KERN_ALERT "%s" TORTURE_FLAG | 859 | printk(KERN_ALERT "%s" TORTURE_FLAG |
792 | "--- %s: nreaders=%d nfakewriters=%d " | 860 | "--- %s: nreaders=%d nfakewriters=%d " |
793 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | 861 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
794 | "shuffle_interval = %d\n", | 862 | "shuffle_interval=%d stutter=%d\n", |
795 | torture_type, tag, nrealreaders, nfakewriters, | 863 | torture_type, tag, nrealreaders, nfakewriters, |
796 | stat_interval, verbose, test_no_idle_hz, shuffle_interval); | 864 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
865 | stutter); | ||
797 | } | 866 | } |
798 | 867 | ||
799 | static void | 868 | static void |
@@ -802,6 +871,11 @@ rcu_torture_cleanup(void) | |||
802 | int i; | 871 | int i; |
803 | 872 | ||
804 | fullstop = 1; | 873 | fullstop = 1; |
874 | if (stutter_task) { | ||
875 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | ||
876 | kthread_stop(stutter_task); | ||
877 | } | ||
878 | stutter_task = NULL; | ||
805 | if (shuffler_task) { | 879 | if (shuffler_task) { |
806 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); | 880 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); |
807 | kthread_stop(shuffler_task); | 881 | kthread_stop(shuffler_task); |
@@ -848,7 +922,9 @@ rcu_torture_cleanup(void) | |||
848 | stats_task = NULL; | 922 | stats_task = NULL; |
849 | 923 | ||
850 | /* Wait for all RCU callbacks to fire. */ | 924 | /* Wait for all RCU callbacks to fire. */ |
851 | rcu_barrier(); | 925 | |
926 | if (cur_ops->cb_barrier != NULL) | ||
927 | cur_ops->cb_barrier(); | ||
852 | 928 | ||
853 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ | 929 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
854 | 930 | ||
@@ -868,7 +944,7 @@ rcu_torture_init(void) | |||
868 | int firsterr = 0; | 944 | int firsterr = 0; |
869 | static struct rcu_torture_ops *torture_ops[] = | 945 | static struct rcu_torture_ops *torture_ops[] = |
870 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 946 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, |
871 | &srcu_ops, &sched_ops, }; | 947 | &srcu_ops, &sched_ops, &sched_ops_sync, }; |
872 | 948 | ||
873 | /* Process args and tell the world that the torturer is on the job. */ | 949 | /* Process args and tell the world that the torturer is on the job. */ |
874 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | 950 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
@@ -988,6 +1064,19 @@ rcu_torture_init(void) | |||
988 | goto unwind; | 1064 | goto unwind; |
989 | } | 1065 | } |
990 | } | 1066 | } |
1067 | if (stutter < 0) | ||
1068 | stutter = 0; | ||
1069 | if (stutter) { | ||
1070 | /* Create the stutter thread */ | ||
1071 | stutter_task = kthread_run(rcu_torture_stutter, NULL, | ||
1072 | "rcu_torture_stutter"); | ||
1073 | if (IS_ERR(stutter_task)) { | ||
1074 | firsterr = PTR_ERR(stutter_task); | ||
1075 | VERBOSE_PRINTK_ERRSTRING("Failed to create stutter"); | ||
1076 | stutter_task = NULL; | ||
1077 | goto unwind; | ||
1078 | } | ||
1079 | } | ||
991 | return 0; | 1080 | return 0; |
992 | 1081 | ||
993 | unwind: | 1082 | unwind: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 29116652dca..c6887cf135c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -82,6 +82,9 @@ extern int maps_protect; | |||
82 | extern int sysctl_stat_interval; | 82 | extern int sysctl_stat_interval; |
83 | extern int latencytop_enabled; | 83 | extern int latencytop_enabled; |
84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; | 84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; |
85 | #ifdef CONFIG_RCU_TORTURE_TEST | ||
86 | extern int rcutorture_runnable; | ||
87 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | ||
85 | 88 | ||
86 | /* Constants used for minimum and maximum */ | 89 | /* Constants used for minimum and maximum */ |
87 | #if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) | 90 | #if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) |
@@ -813,6 +816,16 @@ static struct ctl_table kern_table[] = { | |||
813 | .child = key_sysctls, | 816 | .child = key_sysctls, |
814 | }, | 817 | }, |
815 | #endif | 818 | #endif |
819 | #ifdef CONFIG_RCU_TORTURE_TEST | ||
820 | { | ||
821 | .ctl_name = CTL_UNNUMBERED, | ||
822 | .procname = "rcutorture_runnable", | ||
823 | .data = &rcutorture_runnable, | ||
824 | .maxlen = sizeof(int), | ||
825 | .mode = 0644, | ||
826 | .proc_handler = &proc_dointvec, | ||
827 | }, | ||
828 | #endif | ||
816 | /* | 829 | /* |
817 | * NOTE: do not add new entries to this table unless you have read | 830 | * NOTE: do not add new entries to this table unless you have read |
818 | * Documentation/sysctl/ctl_unnumbered.txt | 831 | * Documentation/sysctl/ctl_unnumbered.txt |