diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-11-27 05:09:19 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-27 05:09:19 -0500 |
commit | 7d5b15831039837f3551bf40f188385ed8ad5e16 (patch) | |
tree | a95870602cabbbd23fe0ff5c9f93a3fc22a81566 /kernel/softirq.c | |
parent | e0edc78f25c020dea66742c05a7fbcb2ff3df629 (diff) | |
parent | f1a83e652bedef88d6d77d3dc58250e08e7062bd (diff) |
Merge branch 'core/urgent' into core/locking
Prepare for dependent patch.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 54 |
1 files changed, 45 insertions, 9 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 11025ccc06dd..f84aa48c0e66 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -211,14 +211,52 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
211 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) | 211 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
212 | #define MAX_SOFTIRQ_RESTART 10 | 212 | #define MAX_SOFTIRQ_RESTART 10 |
213 | 213 | ||
214 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
215 | /* | ||
216 | * Convoluted means of passing __do_softirq() a message through the various | ||
217 | * architecture execute_on_stack() bits. | ||
218 | * | ||
219 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need | ||
220 | * to keep the lockdep irq context tracking as tight as possible in order to | ||
221 | * not miss-qualify lock contexts and miss possible deadlocks. | ||
222 | */ | ||
223 | static DEFINE_PER_CPU(int, softirq_from_hardirq); | ||
224 | |||
225 | static inline void lockdep_softirq_from_hardirq(void) | ||
226 | { | ||
227 | this_cpu_write(softirq_from_hardirq, 1); | ||
228 | } | ||
229 | |||
230 | static inline void lockdep_softirq_start(void) | ||
231 | { | ||
232 | if (this_cpu_read(softirq_from_hardirq)) | ||
233 | trace_hardirq_exit(); | ||
234 | lockdep_softirq_enter(); | ||
235 | } | ||
236 | |||
237 | static inline void lockdep_softirq_end(void) | ||
238 | { | ||
239 | lockdep_softirq_exit(); | ||
240 | if (this_cpu_read(softirq_from_hardirq)) { | ||
241 | this_cpu_write(softirq_from_hardirq, 0); | ||
242 | trace_hardirq_enter(); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | #else | ||
247 | static inline void lockdep_softirq_from_hardirq(void) { } | ||
248 | static inline void lockdep_softirq_start(void) { } | ||
249 | static inline void lockdep_softirq_end(void) { } | ||
250 | #endif | ||
251 | |||
214 | asmlinkage void __do_softirq(void) | 252 | asmlinkage void __do_softirq(void) |
215 | { | 253 | { |
216 | struct softirq_action *h; | ||
217 | __u32 pending; | ||
218 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; | 254 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
219 | int cpu; | ||
220 | unsigned long old_flags = current->flags; | 255 | unsigned long old_flags = current->flags; |
221 | int max_restart = MAX_SOFTIRQ_RESTART; | 256 | int max_restart = MAX_SOFTIRQ_RESTART; |
257 | struct softirq_action *h; | ||
258 | __u32 pending; | ||
259 | int cpu; | ||
222 | 260 | ||
223 | /* | 261 | /* |
224 | * Mask out PF_MEMALLOC s current task context is borrowed for the | 262 | * Mask out PF_MEMALLOC s current task context is borrowed for the |
@@ -231,7 +269,7 @@ asmlinkage void __do_softirq(void) | |||
231 | account_irq_enter_time(current); | 269 | account_irq_enter_time(current); |
232 | 270 | ||
233 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); | 271 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); |
234 | lockdep_softirq_enter(); | 272 | lockdep_softirq_start(); |
235 | 273 | ||
236 | cpu = smp_processor_id(); | 274 | cpu = smp_processor_id(); |
237 | restart: | 275 | restart: |
@@ -278,16 +316,13 @@ restart: | |||
278 | wakeup_softirqd(); | 316 | wakeup_softirqd(); |
279 | } | 317 | } |
280 | 318 | ||
281 | lockdep_softirq_exit(); | 319 | lockdep_softirq_end(); |
282 | |||
283 | account_irq_exit_time(current); | 320 | account_irq_exit_time(current); |
284 | __local_bh_enable(SOFTIRQ_OFFSET); | 321 | __local_bh_enable(SOFTIRQ_OFFSET); |
285 | WARN_ON_ONCE(in_interrupt()); | 322 | WARN_ON_ONCE(in_interrupt()); |
286 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 323 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
287 | } | 324 | } |
288 | 325 | ||
289 | |||
290 | |||
291 | asmlinkage void do_softirq(void) | 326 | asmlinkage void do_softirq(void) |
292 | { | 327 | { |
293 | __u32 pending; | 328 | __u32 pending; |
@@ -330,6 +365,7 @@ void irq_enter(void) | |||
330 | static inline void invoke_softirq(void) | 365 | static inline void invoke_softirq(void) |
331 | { | 366 | { |
332 | if (!force_irqthreads) { | 367 | if (!force_irqthreads) { |
368 | lockdep_softirq_from_hardirq(); | ||
333 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | 369 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
334 | /* | 370 | /* |
335 | * We can safely execute softirq on the current stack if | 371 | * We can safely execute softirq on the current stack if |
@@ -375,13 +411,13 @@ void irq_exit(void) | |||
375 | #endif | 411 | #endif |
376 | 412 | ||
377 | account_irq_exit_time(current); | 413 | account_irq_exit_time(current); |
378 | trace_hardirq_exit(); | ||
379 | preempt_count_sub(HARDIRQ_OFFSET); | 414 | preempt_count_sub(HARDIRQ_OFFSET); |
380 | if (!in_interrupt() && local_softirq_pending()) | 415 | if (!in_interrupt() && local_softirq_pending()) |
381 | invoke_softirq(); | 416 | invoke_softirq(); |
382 | 417 | ||
383 | tick_irq_exit(); | 418 | tick_irq_exit(); |
384 | rcu_irq_exit(); | 419 | rcu_irq_exit(); |
420 | trace_hardirq_exit(); /* must be last! */ | ||
385 | } | 421 | } |
386 | 422 | ||
387 | /* | 423 | /* |