diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-01 13:16:42 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-01 13:16:42 -0400 |
| commit | 620e77533f29796df7aff861e79bd72e08554ebb (patch) | |
| tree | 844afce2333549bc5b8d7dc87a4875b9216a0023 /kernel/rcutiny.c | |
| parent | 6977b4c7736e8809b7959c66875a16c0bbcf2152 (diff) | |
| parent | fa34da708cbe1e2d9a2ee7fc68ea8fccbf095d12 (diff) | |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes from Ingo Molnar:
0. 'idle RCU':
Adds RCU APIs that allow non-idle tasks to enter RCU idle mode and
provides x86 code to make use of them, allowing RCU to treat
user-mode execution as an extended quiescent state when the new
RCU_USER_QS kernel configuration parameter is specified. (Work is
in progress to port this to a few other architectures, but is not
part of this series.)
1. A fix for a latent bug that has been in RCU ever since the addition
of CPU stall warnings. This bug results in false-positive stall
warnings, but thus far only on embedded systems with severely
cut-down userspace configurations.
2. Further reductions in latency spikes for huge systems, along with
additional boot-time adaptation to the actual hardware.
This is a large change, as it moves RCU grace-period initialization
and cleanup, along with quiescent-state forcing, from softirq to a
kthread. However, it appears to be in quite good shape (famous
last words).
3. Updates to documentation and rcutorture, the latter category
including keeping statistics on CPU-hotplug latencies and fixing
some initialization-time races.
4. CPU-hotplug fixes and improvements.
5. Idle-loop fixes that were omitted on an earlier submission.
6. Miscellaneous fixes and improvements
In certain RCU configurations new kernel threads will show up (rcu_bh,
rcu_sched), showing RCU processing overhead.
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (90 commits)
rcu: Apply micro-optimization and int/bool fixes to RCU's idle handling
rcu: Userspace RCU extended QS selftest
x86: Exit RCU extended QS on notify resume
x86: Use the new schedule_user API on userspace preemption
rcu: Exit RCU extended QS on user preemption
rcu: Exit RCU extended QS on kernel preemption after irq/exception
x86: Exception hooks for userspace RCU extended QS
x86: Unspaghettize do_general_protection()
x86: Syscall hooks for userspace RCU extended QS
rcu: Switch task's syscall hooks on context switch
rcu: Ignore userspace extended quiescent state by default
rcu: Allow rcu_user_enter()/exit() to nest
rcu: Settle config for userspace extended quiescent state
rcu: Make RCU_FAST_NO_HZ handle adaptive ticks
rcu: New rcu_user_enter_after_irq() and rcu_user_exit_after_irq() APIs
rcu: New rcu_user_enter() and rcu_user_exit() APIs
ia64: Add missing RCU idle APIs on idle loop
xtensa: Add missing RCU idle APIs on idle loop
score: Add missing RCU idle APIs on idle loop
parisc: Add missing RCU idle APIs on idle loop
...
Diffstat (limited to 'kernel/rcutiny.c')
| -rw-r--r-- | kernel/rcutiny.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 547b1fe5b052..e4c6a598d6f7 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
| @@ -56,25 +56,28 @@ static void __call_rcu(struct rcu_head *head, | |||
| 56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
| 57 | 57 | ||
| 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
| 59 | static void rcu_idle_enter_common(long long oldval) | 59 | static void rcu_idle_enter_common(long long newval) |
| 60 | { | 60 | { |
| 61 | if (rcu_dynticks_nesting) { | 61 | if (newval) { |
| 62 | RCU_TRACE(trace_rcu_dyntick("--=", | 62 | RCU_TRACE(trace_rcu_dyntick("--=", |
| 63 | oldval, rcu_dynticks_nesting)); | 63 | rcu_dynticks_nesting, newval)); |
| 64 | rcu_dynticks_nesting = newval; | ||
| 64 | return; | 65 | return; |
| 65 | } | 66 | } |
| 66 | RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); | 67 | RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); |
| 67 | if (!is_idle_task(current)) { | 68 | if (!is_idle_task(current)) { |
| 68 | struct task_struct *idle = idle_task(smp_processor_id()); | 69 | struct task_struct *idle = idle_task(smp_processor_id()); |
| 69 | 70 | ||
| 70 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", | 71 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", |
| 71 | oldval, rcu_dynticks_nesting)); | 72 | rcu_dynticks_nesting, newval)); |
| 72 | ftrace_dump(DUMP_ALL); | 73 | ftrace_dump(DUMP_ALL); |
| 73 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 74 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
| 74 | current->pid, current->comm, | 75 | current->pid, current->comm, |
| 75 | idle->pid, idle->comm); /* must be idle task! */ | 76 | idle->pid, idle->comm); /* must be idle task! */ |
| 76 | } | 77 | } |
| 77 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | 78 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
| 79 | barrier(); | ||
| 80 | rcu_dynticks_nesting = newval; | ||
| 78 | } | 81 | } |
| 79 | 82 | ||
| 80 | /* | 83 | /* |
| @@ -84,17 +87,16 @@ static void rcu_idle_enter_common(long long oldval) | |||
| 84 | void rcu_idle_enter(void) | 87 | void rcu_idle_enter(void) |
| 85 | { | 88 | { |
| 86 | unsigned long flags; | 89 | unsigned long flags; |
| 87 | long long oldval; | 90 | long long newval; |
| 88 | 91 | ||
| 89 | local_irq_save(flags); | 92 | local_irq_save(flags); |
| 90 | oldval = rcu_dynticks_nesting; | ||
| 91 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | 93 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
| 92 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | 94 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == |
| 93 | DYNTICK_TASK_NEST_VALUE) | 95 | DYNTICK_TASK_NEST_VALUE) |
| 94 | rcu_dynticks_nesting = 0; | 96 | newval = 0; |
| 95 | else | 97 | else |
| 96 | rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 98 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
| 97 | rcu_idle_enter_common(oldval); | 99 | rcu_idle_enter_common(newval); |
| 98 | local_irq_restore(flags); | 100 | local_irq_restore(flags); |
| 99 | } | 101 | } |
| 100 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 102 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
| @@ -105,15 +107,15 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
| 105 | void rcu_irq_exit(void) | 107 | void rcu_irq_exit(void) |
| 106 | { | 108 | { |
| 107 | unsigned long flags; | 109 | unsigned long flags; |
| 108 | long long oldval; | 110 | long long newval; |
| 109 | 111 | ||
| 110 | local_irq_save(flags); | 112 | local_irq_save(flags); |
| 111 | oldval = rcu_dynticks_nesting; | 113 | newval = rcu_dynticks_nesting - 1; |
| 112 | rcu_dynticks_nesting--; | 114 | WARN_ON_ONCE(newval < 0); |
| 113 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); | 115 | rcu_idle_enter_common(newval); |
| 114 | rcu_idle_enter_common(oldval); | ||
| 115 | local_irq_restore(flags); | 116 | local_irq_restore(flags); |
| 116 | } | 117 | } |
| 118 | EXPORT_SYMBOL_GPL(rcu_irq_exit); | ||
| 117 | 119 | ||
| 118 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ | 120 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ |
| 119 | static void rcu_idle_exit_common(long long oldval) | 121 | static void rcu_idle_exit_common(long long oldval) |
| @@ -171,6 +173,7 @@ void rcu_irq_enter(void) | |||
| 171 | rcu_idle_exit_common(oldval); | 173 | rcu_idle_exit_common(oldval); |
| 172 | local_irq_restore(flags); | 174 | local_irq_restore(flags); |
| 173 | } | 175 | } |
| 176 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | ||
| 174 | 177 | ||
| 175 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 178 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 176 | 179 | ||
