diff options
-rw-r--r-- | kernel/softirq.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index f84aa48c0e66..9a4500e4c189 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -213,40 +213,35 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
213 | 213 | ||
214 | #ifdef CONFIG_TRACE_IRQFLAGS | 214 | #ifdef CONFIG_TRACE_IRQFLAGS |
215 | /* | 215 | /* |
216 | * Convoluted means of passing __do_softirq() a message through the various | ||
217 | * architecture execute_on_stack() bits. | ||
218 | * | ||
219 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need | 216 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
220 | * to keep the lockdep irq context tracking as tight as possible in order to | 217 | * to keep the lockdep irq context tracking as tight as possible in order to |
221 | * not miss-qualify lock contexts and miss possible deadlocks. | 218 | * not miss-qualify lock contexts and miss possible deadlocks. |
222 | */ | 219 | */ |
223 | static DEFINE_PER_CPU(int, softirq_from_hardirq); | ||
224 | 220 | ||
225 | static inline void lockdep_softirq_from_hardirq(void) | 221 | static inline bool lockdep_softirq_start(void) |
226 | { | 222 | { |
227 | this_cpu_write(softirq_from_hardirq, 1); | 223 | bool in_hardirq = false; |
228 | } | ||
229 | 224 | ||
230 | static inline void lockdep_softirq_start(void) | 225 | if (trace_hardirq_context(current)) { |
231 | { | 226 | in_hardirq = true; |
232 | if (this_cpu_read(softirq_from_hardirq)) | ||
233 | trace_hardirq_exit(); | 227 | trace_hardirq_exit(); |
228 | } | ||
229 | |||
234 | lockdep_softirq_enter(); | 230 | lockdep_softirq_enter(); |
231 | |||
232 | return in_hardirq; | ||
235 | } | 233 | } |
236 | 234 | ||
237 | static inline void lockdep_softirq_end(void) | 235 | static inline void lockdep_softirq_end(bool in_hardirq) |
238 | { | 236 | { |
239 | lockdep_softirq_exit(); | 237 | lockdep_softirq_exit(); |
240 | if (this_cpu_read(softirq_from_hardirq)) { | 238 | |
241 | this_cpu_write(softirq_from_hardirq, 0); | 239 | if (in_hardirq) |
242 | trace_hardirq_enter(); | 240 | trace_hardirq_enter(); |
243 | } | ||
244 | } | 241 | } |
245 | |||
246 | #else | 242 | #else |
247 | static inline void lockdep_softirq_from_hardirq(void) { } | 243 | static inline bool lockdep_softirq_start(void) { return false; } |
248 | static inline void lockdep_softirq_start(void) { } | 244 | static inline void lockdep_softirq_end(bool in_hardirq) { } |
249 | static inline void lockdep_softirq_end(void) { } | ||
250 | #endif | 245 | #endif |
251 | 246 | ||
252 | asmlinkage void __do_softirq(void) | 247 | asmlinkage void __do_softirq(void) |
@@ -255,6 +250,7 @@ asmlinkage void __do_softirq(void) | |||
255 | unsigned long old_flags = current->flags; | 250 | unsigned long old_flags = current->flags; |
256 | int max_restart = MAX_SOFTIRQ_RESTART; | 251 | int max_restart = MAX_SOFTIRQ_RESTART; |
257 | struct softirq_action *h; | 252 | struct softirq_action *h; |
253 | bool in_hardirq; | ||
258 | __u32 pending; | 254 | __u32 pending; |
259 | int cpu; | 255 | int cpu; |
260 | 256 | ||
@@ -269,7 +265,7 @@ asmlinkage void __do_softirq(void) | |||
269 | account_irq_enter_time(current); | 265 | account_irq_enter_time(current); |
270 | 266 | ||
271 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); | 267 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); |
272 | lockdep_softirq_start(); | 268 | in_hardirq = lockdep_softirq_start(); |
273 | 269 | ||
274 | cpu = smp_processor_id(); | 270 | cpu = smp_processor_id(); |
275 | restart: | 271 | restart: |
@@ -316,7 +312,7 @@ restart: | |||
316 | wakeup_softirqd(); | 312 | wakeup_softirqd(); |
317 | } | 313 | } |
318 | 314 | ||
319 | lockdep_softirq_end(); | 315 | lockdep_softirq_end(in_hardirq); |
320 | account_irq_exit_time(current); | 316 | account_irq_exit_time(current); |
321 | __local_bh_enable(SOFTIRQ_OFFSET); | 317 | __local_bh_enable(SOFTIRQ_OFFSET); |
322 | WARN_ON_ONCE(in_interrupt()); | 318 | WARN_ON_ONCE(in_interrupt()); |
@@ -365,7 +361,6 @@ void irq_enter(void) | |||
365 | static inline void invoke_softirq(void) | 361 | static inline void invoke_softirq(void) |
366 | { | 362 | { |
367 | if (!force_irqthreads) { | 363 | if (!force_irqthreads) { |
368 | lockdep_softirq_from_hardirq(); | ||
369 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | 364 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
370 | /* | 365 | /* |
371 | * We can safely execute softirq on the current stack if | 366 | * We can safely execute softirq on the current stack if |