diff options
Diffstat (limited to 'kernel/rcu/tiny.c')
-rw-r--r-- | kernel/rcu/tiny.c | 113 |
1 files changed, 10 insertions, 103 deletions
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 0db5649f8817..cc9ceca7bde1 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c | |||
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head, | |||
47 | void (*func)(struct rcu_head *rcu), | 47 | void (*func)(struct rcu_head *rcu), |
48 | struct rcu_ctrlblk *rcp); | 48 | struct rcu_ctrlblk *rcp); |
49 | 49 | ||
50 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
51 | |||
52 | #include "tiny_plugin.h" | 50 | #include "tiny_plugin.h" |
53 | 51 | ||
54 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */ | ||
55 | static void rcu_idle_enter_common(long long newval) | ||
56 | { | ||
57 | if (newval) { | ||
58 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), | ||
59 | rcu_dynticks_nesting, newval)); | ||
60 | rcu_dynticks_nesting = newval; | ||
61 | return; | ||
62 | } | ||
63 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), | ||
64 | rcu_dynticks_nesting, newval)); | ||
65 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { | ||
66 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); | ||
67 | |||
68 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), | ||
69 | rcu_dynticks_nesting, newval)); | ||
70 | ftrace_dump(DUMP_ALL); | ||
71 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | ||
72 | current->pid, current->comm, | ||
73 | idle->pid, idle->comm); /* must be idle task! */ | ||
74 | } | ||
75 | rcu_sched_qs(); /* implies rcu_bh_inc() */ | ||
76 | barrier(); | ||
77 | rcu_dynticks_nesting = newval; | ||
78 | } | ||
79 | |||
80 | /* | 52 | /* |
81 | * Enter idle, which is an extended quiescent state if we have fully | 53 | * Enter idle, which is an extended quiescent state if we have fully |
82 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). | 54 | * entered that mode. |
83 | */ | 55 | */ |
84 | void rcu_idle_enter(void) | 56 | void rcu_idle_enter(void) |
85 | { | 57 | { |
86 | unsigned long flags; | ||
87 | long long newval; | ||
88 | |||
89 | local_irq_save(flags); | ||
90 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | ||
91 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | ||
92 | DYNTICK_TASK_NEST_VALUE) | ||
93 | newval = 0; | ||
94 | else | ||
95 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; | ||
96 | rcu_idle_enter_common(newval); | ||
97 | local_irq_restore(flags); | ||
98 | } | 58 | } |
99 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 59 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
100 | 60 | ||
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
103 | */ | 63 | */ |
104 | void rcu_irq_exit(void) | 64 | void rcu_irq_exit(void) |
105 | { | 65 | { |
106 | unsigned long flags; | ||
107 | long long newval; | ||
108 | |||
109 | local_irq_save(flags); | ||
110 | newval = rcu_dynticks_nesting - 1; | ||
111 | WARN_ON_ONCE(newval < 0); | ||
112 | rcu_idle_enter_common(newval); | ||
113 | local_irq_restore(flags); | ||
114 | } | 66 | } |
115 | EXPORT_SYMBOL_GPL(rcu_irq_exit); | 67 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
116 | 68 | ||
117 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */ | ||
118 | static void rcu_idle_exit_common(long long oldval) | ||
119 | { | ||
120 | if (oldval) { | ||
121 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), | ||
122 | oldval, rcu_dynticks_nesting)); | ||
123 | return; | ||
124 | } | ||
125 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); | ||
126 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { | ||
127 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); | ||
128 | |||
129 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), | ||
130 | oldval, rcu_dynticks_nesting)); | ||
131 | ftrace_dump(DUMP_ALL); | ||
132 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | ||
133 | current->pid, current->comm, | ||
134 | idle->pid, idle->comm); /* must be idle task! */ | ||
135 | } | ||
136 | } | ||
137 | |||
138 | /* | 69 | /* |
139 | * Exit idle, so that we are no longer in an extended quiescent state. | 70 | * Exit idle, so that we are no longer in an extended quiescent state. |
140 | */ | 71 | */ |
141 | void rcu_idle_exit(void) | 72 | void rcu_idle_exit(void) |
142 | { | 73 | { |
143 | unsigned long flags; | ||
144 | long long oldval; | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | oldval = rcu_dynticks_nesting; | ||
148 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); | ||
149 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) | ||
150 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
151 | else | ||
152 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
153 | rcu_idle_exit_common(oldval); | ||
154 | local_irq_restore(flags); | ||
155 | } | 74 | } |
156 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 75 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
157 | 76 | ||
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); | |||
160 | */ | 79 | */ |
161 | void rcu_irq_enter(void) | 80 | void rcu_irq_enter(void) |
162 | { | 81 | { |
163 | unsigned long flags; | ||
164 | long long oldval; | ||
165 | |||
166 | local_irq_save(flags); | ||
167 | oldval = rcu_dynticks_nesting; | ||
168 | rcu_dynticks_nesting++; | ||
169 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); | ||
170 | rcu_idle_exit_common(oldval); | ||
171 | local_irq_restore(flags); | ||
172 | } | 82 | } |
173 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | 83 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
174 | 84 | ||
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); | |||
179 | */ | 89 | */ |
180 | bool notrace __rcu_is_watching(void) | 90 | bool notrace __rcu_is_watching(void) |
181 | { | 91 | { |
182 | return rcu_dynticks_nesting; | 92 | return true; |
183 | } | 93 | } |
184 | EXPORT_SYMBOL(__rcu_is_watching); | 94 | EXPORT_SYMBOL(__rcu_is_watching); |
185 | 95 | ||
186 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | 96 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
187 | 97 | ||
188 | /* | 98 | /* |
189 | * Test whether the current CPU was interrupted from idle. Nested | ||
190 | * interrupts don't count, we must be running at the first interrupt | ||
191 | * level. | ||
192 | */ | ||
193 | static int rcu_is_cpu_rrupt_from_idle(void) | ||
194 | { | ||
195 | return rcu_dynticks_nesting <= 1; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). | 99 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
200 | * Also irqs are disabled to avoid confusion due to interrupt handlers | 100 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
201 | * invoking call_rcu(). | 101 | * invoking call_rcu(). |
@@ -250,7 +150,7 @@ void rcu_bh_qs(void) | |||
250 | void rcu_check_callbacks(int user) | 150 | void rcu_check_callbacks(int user) |
251 | { | 151 | { |
252 | RCU_TRACE(check_cpu_stalls()); | 152 | RCU_TRACE(check_cpu_stalls()); |
253 | if (user || rcu_is_cpu_rrupt_from_idle()) | 153 | if (user) |
254 | rcu_sched_qs(); | 154 | rcu_sched_qs(); |
255 | else if (!in_softirq()) | 155 | else if (!in_softirq()) |
256 | rcu_bh_qs(); | 156 | rcu_bh_qs(); |
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head, | |||
357 | rcp->curtail = &head->next; | 257 | rcp->curtail = &head->next; |
358 | RCU_TRACE(rcp->qlen++); | 258 | RCU_TRACE(rcp->qlen++); |
359 | local_irq_restore(flags); | 259 | local_irq_restore(flags); |
260 | |||
261 | if (unlikely(is_idle_task(current))) { | ||
262 | /* force scheduling for rcu_sched_qs() */ | ||
263 | resched_cpu(0); | ||
264 | } | ||
360 | } | 265 | } |
361 | 266 | ||
362 | /* | 267 | /* |
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
383 | void __init rcu_init(void) | 288 | void __init rcu_init(void) |
384 | { | 289 | { |
385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 290 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
291 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk)); | ||
292 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk)); | ||
386 | 293 | ||
387 | rcu_early_boot_tests(); | 294 | rcu_early_boot_tests(); |
388 | } | 295 | } |