diff options
-rw-r--r-- | arch/Kconfig | 10 | ||||
-rw-r--r-- | kernel/softirq.c | 14 |
2 files changed, 20 insertions, 4 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index af2cc6eabcc7..ad95133f8fae 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -390,6 +390,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX | |||
390 | Some architectures generate an _ in front of C symbols; things like | 390 | Some architectures generate an _ in front of C symbols; things like |
391 | module loading and assembly files need to know about this. | 391 | module loading and assembly files need to know about this. |
392 | 392 | ||
393 | config HAVE_IRQ_EXIT_ON_IRQ_STACK | ||
394 | bool | ||
395 | help | ||
396 | Architecture doesn't only execute the irq handler on the irq stack | ||
397 | but also irq_exit(). This way we can process softirqs on this irq | ||
398 | stack instead of switching to a new one when we call __do_softirq() | ||
399 | in the end of an hardirq. | ||
400 | This spares a stack switch and improves cache usage on softirq | ||
401 | processing. | ||
402 | |||
393 | # | 403 | # |
394 | # ABI hall of shame | 404 | # ABI hall of shame |
395 | # | 405 | # |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 2b4328ea769f..dacd0ab51df4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -332,15 +332,21 @@ void irq_enter(void) | |||
332 | static inline void invoke_softirq(void) | 332 | static inline void invoke_softirq(void) |
333 | { | 333 | { |
334 | if (!force_irqthreads) { | 334 | if (!force_irqthreads) { |
335 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | ||
335 | /* | 336 | /* |
336 | * We can safely execute softirq on the current stack if | 337 | * We can safely execute softirq on the current stack if |
337 | * it is the irq stack, because it should be near empty | 338 | * it is the irq stack, because it should be near empty |
338 | * at this stage. But we have no way to know if the arch | 339 | * at this stage. |
339 | * calls irq_exit() on the irq stack. So call softirq | 340 | */ |
340 | * in its own stack to prevent from any overrun on top | 341 | __do_softirq(); |
341 | * of a potentially deep task stack. | 342 | #else |
343 | /* | ||
344 | * Otherwise, irq_exit() is called on the task stack that can | ||
345 | * be potentially deep already. So call softirq in its own stack | ||
346 | * to prevent from any overrun. | ||
342 | */ | 347 | */ |
343 | do_softirq_own_stack(); | 348 | do_softirq_own_stack(); |
349 | #endif | ||
344 | } else { | 350 | } else { |
345 | wakeup_softirqd(); | 351 | wakeup_softirqd(); |
346 | } | 352 | } |