diff options
Diffstat (limited to 'kernel/softirq.c')
| -rw-r--r-- | kernel/softirq.c | 53 |
1 files changed, 33 insertions, 20 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index d7d498d8cc4f..b24988353458 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #define CREATE_TRACE_POINTS | 29 | #define CREATE_TRACE_POINTS |
| 30 | #include <trace/events/irq.h> | 30 | #include <trace/events/irq.h> |
| 31 | 31 | ||
| 32 | #include <asm/irq.h> | ||
| 33 | /* | 32 | /* |
| 34 | - No shared variables, all the data are CPU local. | 33 | - No shared variables, all the data are CPU local. |
| 35 | - If a softirq needs serialization, let it serialize itself | 34 | - If a softirq needs serialization, let it serialize itself |
| @@ -100,13 +99,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
| 100 | 99 | ||
| 101 | raw_local_irq_save(flags); | 100 | raw_local_irq_save(flags); |
| 102 | /* | 101 | /* |
| 103 | * The preempt tracer hooks into add_preempt_count and will break | 102 | * The preempt tracer hooks into preempt_count_add and will break |
| 104 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | 103 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
| 105 | * is set and before current->softirq_enabled is cleared. | 104 | * is set and before current->softirq_enabled is cleared. |
| 106 | * We must manually increment preempt_count here and manually | 105 | * We must manually increment preempt_count here and manually |
| 107 | * call the trace_preempt_off later. | 106 | * call the trace_preempt_off later. |
| 108 | */ | 107 | */ |
| 109 | preempt_count() += cnt; | 108 | __preempt_count_add(cnt); |
| 110 | /* | 109 | /* |
| 111 | * Were softirqs turned off above: | 110 | * Were softirqs turned off above: |
| 112 | */ | 111 | */ |
| @@ -120,7 +119,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
| 120 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 119 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 121 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) | 120 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
| 122 | { | 121 | { |
| 123 | add_preempt_count(cnt); | 122 | preempt_count_add(cnt); |
| 124 | barrier(); | 123 | barrier(); |
| 125 | } | 124 | } |
| 126 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 125 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| @@ -134,12 +133,11 @@ EXPORT_SYMBOL(local_bh_disable); | |||
| 134 | 133 | ||
| 135 | static void __local_bh_enable(unsigned int cnt) | 134 | static void __local_bh_enable(unsigned int cnt) |
| 136 | { | 135 | { |
| 137 | WARN_ON_ONCE(in_irq()); | ||
| 138 | WARN_ON_ONCE(!irqs_disabled()); | 136 | WARN_ON_ONCE(!irqs_disabled()); |
| 139 | 137 | ||
| 140 | if (softirq_count() == cnt) | 138 | if (softirq_count() == cnt) |
| 141 | trace_softirqs_on(_RET_IP_); | 139 | trace_softirqs_on(_RET_IP_); |
| 142 | sub_preempt_count(cnt); | 140 | preempt_count_sub(cnt); |
| 143 | } | 141 | } |
| 144 | 142 | ||
| 145 | /* | 143 | /* |
| @@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt) | |||
| 149 | */ | 147 | */ |
| 150 | void _local_bh_enable(void) | 148 | void _local_bh_enable(void) |
| 151 | { | 149 | { |
| 150 | WARN_ON_ONCE(in_irq()); | ||
| 152 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); | 151 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
| 153 | } | 152 | } |
| 154 | 153 | ||
| @@ -169,12 +168,17 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
| 169 | * Keep preemption disabled until we are done with | 168 | * Keep preemption disabled until we are done with |
| 170 | * softirq processing: | 169 | * softirq processing: |
| 171 | */ | 170 | */ |
| 172 | sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); | 171 | preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); |
| 173 | 172 | ||
| 174 | if (unlikely(!in_interrupt() && local_softirq_pending())) | 173 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
| 174 | /* | ||
| 175 | * Run softirq if any pending. And do it in its own stack | ||
| 176 | * as we may be calling this deep in a task call stack already. | ||
| 177 | */ | ||
| 175 | do_softirq(); | 178 | do_softirq(); |
| 179 | } | ||
| 176 | 180 | ||
| 177 | dec_preempt_count(); | 181 | preempt_count_dec(); |
| 178 | #ifdef CONFIG_TRACE_IRQFLAGS | 182 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 179 | local_irq_enable(); | 183 | local_irq_enable(); |
| 180 | #endif | 184 | #endif |
| @@ -256,7 +260,7 @@ restart: | |||
| 256 | " exited with %08x?\n", vec_nr, | 260 | " exited with %08x?\n", vec_nr, |
| 257 | softirq_to_name[vec_nr], h->action, | 261 | softirq_to_name[vec_nr], h->action, |
| 258 | prev_count, preempt_count()); | 262 | prev_count, preempt_count()); |
| 259 | preempt_count() = prev_count; | 263 | preempt_count_set(prev_count); |
| 260 | } | 264 | } |
| 261 | 265 | ||
| 262 | rcu_bh_qs(cpu); | 266 | rcu_bh_qs(cpu); |
| @@ -280,10 +284,11 @@ restart: | |||
| 280 | 284 | ||
| 281 | account_irq_exit_time(current); | 285 | account_irq_exit_time(current); |
| 282 | __local_bh_enable(SOFTIRQ_OFFSET); | 286 | __local_bh_enable(SOFTIRQ_OFFSET); |
| 287 | WARN_ON_ONCE(in_interrupt()); | ||
| 283 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 288 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
| 284 | } | 289 | } |
| 285 | 290 | ||
| 286 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 291 | |
| 287 | 292 | ||
| 288 | asmlinkage void do_softirq(void) | 293 | asmlinkage void do_softirq(void) |
| 289 | { | 294 | { |
| @@ -298,13 +303,11 @@ asmlinkage void do_softirq(void) | |||
| 298 | pending = local_softirq_pending(); | 303 | pending = local_softirq_pending(); |
| 299 | 304 | ||
| 300 | if (pending) | 305 | if (pending) |
| 301 | __do_softirq(); | 306 | do_softirq_own_stack(); |
| 302 | 307 | ||
| 303 | local_irq_restore(flags); | 308 | local_irq_restore(flags); |
| 304 | } | 309 | } |
| 305 | 310 | ||
| 306 | #endif | ||
| 307 | |||
| 308 | /* | 311 | /* |
| 309 | * Enter an interrupt context. | 312 | * Enter an interrupt context. |
| 310 | */ | 313 | */ |
| @@ -329,15 +332,21 @@ void irq_enter(void) | |||
| 329 | static inline void invoke_softirq(void) | 332 | static inline void invoke_softirq(void) |
| 330 | { | 333 | { |
| 331 | if (!force_irqthreads) { | 334 | if (!force_irqthreads) { |
| 335 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | ||
| 332 | /* | 336 | /* |
| 333 | * We can safely execute softirq on the current stack if | 337 | * We can safely execute softirq on the current stack if |
| 334 | * it is the irq stack, because it should be near empty | 338 | * it is the irq stack, because it should be near empty |
| 335 | * at this stage. But we have no way to know if the arch | 339 | * at this stage. |
| 336 | * calls irq_exit() on the irq stack. So call softirq | ||
| 337 | * in its own stack to prevent from any overrun on top | ||
| 338 | * of a potentially deep task stack. | ||
| 339 | */ | 340 | */ |
| 340 | do_softirq(); | 341 | __do_softirq(); |
| 342 | #else | ||
| 343 | /* | ||
| 344 | * Otherwise, irq_exit() is called on the task stack that can | ||
| 345 | * be potentially deep already. So call softirq in its own stack | ||
| 346 | * to prevent from any overrun. | ||
| 347 | */ | ||
| 348 | do_softirq_own_stack(); | ||
| 349 | #endif | ||
| 341 | } else { | 350 | } else { |
| 342 | wakeup_softirqd(); | 351 | wakeup_softirqd(); |
| 343 | } | 352 | } |
| @@ -369,7 +378,7 @@ void irq_exit(void) | |||
| 369 | 378 | ||
| 370 | account_irq_exit_time(current); | 379 | account_irq_exit_time(current); |
| 371 | trace_hardirq_exit(); | 380 | trace_hardirq_exit(); |
| 372 | sub_preempt_count(HARDIRQ_OFFSET); | 381 | preempt_count_sub(HARDIRQ_OFFSET); |
| 373 | if (!in_interrupt() && local_softirq_pending()) | 382 | if (!in_interrupt() && local_softirq_pending()) |
| 374 | invoke_softirq(); | 383 | invoke_softirq(); |
| 375 | 384 | ||
| @@ -771,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu) | |||
| 771 | { | 780 | { |
| 772 | local_irq_disable(); | 781 | local_irq_disable(); |
| 773 | if (local_softirq_pending()) { | 782 | if (local_softirq_pending()) { |
| 783 | /* | ||
| 784 | * We can safely run softirq on inline stack, as we are not deep | ||
| 785 | * in the task stack here. | ||
| 786 | */ | ||
| 774 | __do_softirq(); | 787 | __do_softirq(); |
| 775 | rcu_note_context_switch(cpu); | 788 | rcu_note_context_switch(cpu); |
| 776 | local_irq_enable(); | 789 | local_irq_enable(); |
