diff options
Diffstat (limited to 'kernel/softirq.c')
| -rw-r--r-- | kernel/softirq.c | 82 |
1 files changed, 54 insertions, 28 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index d19b1c9aa7c5..18f4be0d5fe0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -67,7 +67,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
| 67 | * to the pending events, so lets the scheduler to balance | 67 | * to the pending events, so lets the scheduler to balance |
| 68 | * the softirq load for us. | 68 | * the softirq load for us. |
| 69 | */ | 69 | */ |
| 70 | void wakeup_softirqd(void) | 70 | static void wakeup_softirqd(void) |
| 71 | { | 71 | { |
| 72 | /* Interrupts are disabled: no need to stop preemption */ | 72 | /* Interrupts are disabled: no need to stop preemption */ |
| 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
| @@ -77,11 +77,21 @@ void wakeup_softirqd(void) | |||
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
| 80 | * preempt_count and SOFTIRQ_OFFSET usage: | ||
| 81 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | ||
| 82 | * softirq processing. | ||
| 83 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | ||
| 84 | * on local_bh_disable or local_bh_enable. | ||
| 85 | * This lets us distinguish between whether we are currently processing | ||
| 86 | * softirq and whether we just have bh disabled. | ||
| 87 | */ | ||
| 88 | |||
| 89 | /* | ||
| 80 | * This one is for softirq.c-internal use, | 90 | * This one is for softirq.c-internal use, |
| 81 | * where hardirqs are disabled legitimately: | 91 | * where hardirqs are disabled legitimately: |
| 82 | */ | 92 | */ |
| 83 | #ifdef CONFIG_TRACE_IRQFLAGS | 93 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 84 | static void __local_bh_disable(unsigned long ip) | 94 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) |
| 85 | { | 95 | { |
| 86 | unsigned long flags; | 96 | unsigned long flags; |
| 87 | 97 | ||
| @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip) | |||
| 95 | * We must manually increment preempt_count here and manually | 105 | * We must manually increment preempt_count here and manually |
| 96 | * call the trace_preempt_off later. | 106 | * call the trace_preempt_off later. |
| 97 | */ | 107 | */ |
| 98 | preempt_count() += SOFTIRQ_OFFSET; | 108 | preempt_count() += cnt; |
| 99 | /* | 109 | /* |
| 100 | * Were softirqs turned off above: | 110 | * Were softirqs turned off above: |
| 101 | */ | 111 | */ |
| 102 | if (softirq_count() == SOFTIRQ_OFFSET) | 112 | if (softirq_count() == cnt) |
| 103 | trace_softirqs_off(ip); | 113 | trace_softirqs_off(ip); |
| 104 | raw_local_irq_restore(flags); | 114 | raw_local_irq_restore(flags); |
| 105 | 115 | ||
| 106 | if (preempt_count() == SOFTIRQ_OFFSET) | 116 | if (preempt_count() == cnt) |
| 107 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 117 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
| 108 | } | 118 | } |
| 109 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 119 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 110 | static inline void __local_bh_disable(unsigned long ip) | 120 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
| 111 | { | 121 | { |
| 112 | add_preempt_count(SOFTIRQ_OFFSET); | 122 | add_preempt_count(cnt); |
| 113 | barrier(); | 123 | barrier(); |
| 114 | } | 124 | } |
| 115 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 125 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 116 | 126 | ||
| 117 | void local_bh_disable(void) | 127 | void local_bh_disable(void) |
| 118 | { | 128 | { |
| 119 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 129 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 130 | SOFTIRQ_DISABLE_OFFSET); | ||
| 120 | } | 131 | } |
| 121 | 132 | ||
| 122 | EXPORT_SYMBOL(local_bh_disable); | 133 | EXPORT_SYMBOL(local_bh_disable); |
| 123 | 134 | ||
| 135 | static void __local_bh_enable(unsigned int cnt) | ||
| 136 | { | ||
| 137 | WARN_ON_ONCE(in_irq()); | ||
| 138 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 139 | |||
| 140 | if (softirq_count() == cnt) | ||
| 141 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 142 | sub_preempt_count(cnt); | ||
| 143 | } | ||
| 144 | |||
| 124 | /* | 145 | /* |
| 125 | * Special-case - softirqs can safely be enabled in | 146 | * Special-case - softirqs can safely be enabled in |
| 126 | * cond_resched_softirq(), or by __do_softirq(), | 147 | * cond_resched_softirq(), or by __do_softirq(), |
| @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable); | |||
| 128 | */ | 149 | */ |
| 129 | void _local_bh_enable(void) | 150 | void _local_bh_enable(void) |
| 130 | { | 151 | { |
| 131 | WARN_ON_ONCE(in_irq()); | 152 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
| 132 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 133 | |||
| 134 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 135 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 136 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
| 137 | } | 153 | } |
| 138 | 154 | ||
| 139 | EXPORT_SYMBOL(_local_bh_enable); | 155 | EXPORT_SYMBOL(_local_bh_enable); |
| @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
| 147 | /* | 163 | /* |
| 148 | * Are softirqs going to be turned on now: | 164 | * Are softirqs going to be turned on now: |
| 149 | */ | 165 | */ |
| 150 | if (softirq_count() == SOFTIRQ_OFFSET) | 166 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
| 151 | trace_softirqs_on(ip); | 167 | trace_softirqs_on(ip); |
| 152 | /* | 168 | /* |
| 153 | * Keep preemption disabled until we are done with | 169 | * Keep preemption disabled until we are done with |
| 154 | * softirq processing: | 170 | * softirq processing: |
| 155 | */ | 171 | */ |
| 156 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | 172 | sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); |
| 157 | 173 | ||
| 158 | if (unlikely(!in_interrupt() && local_softirq_pending())) | 174 | if (unlikely(!in_interrupt() && local_softirq_pending())) |
| 159 | do_softirq(); | 175 | do_softirq(); |
| @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void) | |||
| 198 | pending = local_softirq_pending(); | 214 | pending = local_softirq_pending(); |
| 199 | account_system_vtime(current); | 215 | account_system_vtime(current); |
| 200 | 216 | ||
| 201 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 217 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 218 | SOFTIRQ_OFFSET); | ||
| 202 | lockdep_softirq_enter(); | 219 | lockdep_softirq_enter(); |
| 203 | 220 | ||
| 204 | cpu = smp_processor_id(); | 221 | cpu = smp_processor_id(); |
| @@ -212,18 +229,20 @@ restart: | |||
| 212 | 229 | ||
| 213 | do { | 230 | do { |
| 214 | if (pending & 1) { | 231 | if (pending & 1) { |
| 232 | unsigned int vec_nr = h - softirq_vec; | ||
| 215 | int prev_count = preempt_count(); | 233 | int prev_count = preempt_count(); |
| 216 | kstat_incr_softirqs_this_cpu(h - softirq_vec); | ||
| 217 | 234 | ||
| 218 | trace_softirq_entry(h, softirq_vec); | 235 | kstat_incr_softirqs_this_cpu(vec_nr); |
| 236 | |||
| 237 | trace_softirq_entry(vec_nr); | ||
| 219 | h->action(h); | 238 | h->action(h); |
| 220 | trace_softirq_exit(h, softirq_vec); | 239 | trace_softirq_exit(vec_nr); |
| 221 | if (unlikely(prev_count != preempt_count())) { | 240 | if (unlikely(prev_count != preempt_count())) { |
| 222 | printk(KERN_ERR "huh, entered softirq %td %s %p" | 241 | printk(KERN_ERR "huh, entered softirq %u %s %p" |
| 223 | "with preempt_count %08x," | 242 | "with preempt_count %08x," |
| 224 | " exited with %08x?\n", h - softirq_vec, | 243 | " exited with %08x?\n", vec_nr, |
| 225 | softirq_to_name[h - softirq_vec], | 244 | softirq_to_name[vec_nr], h->action, |
| 226 | h->action, prev_count, preempt_count()); | 245 | prev_count, preempt_count()); |
| 227 | preempt_count() = prev_count; | 246 | preempt_count() = prev_count; |
| 228 | } | 247 | } |
| 229 | 248 | ||
| @@ -245,7 +264,7 @@ restart: | |||
| 245 | lockdep_softirq_exit(); | 264 | lockdep_softirq_exit(); |
| 246 | 265 | ||
| 247 | account_system_vtime(current); | 266 | account_system_vtime(current); |
| 248 | _local_bh_enable(); | 267 | __local_bh_enable(SOFTIRQ_OFFSET); |
| 249 | } | 268 | } |
| 250 | 269 | ||
| 251 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 270 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
| @@ -279,10 +298,16 @@ void irq_enter(void) | |||
| 279 | 298 | ||
| 280 | rcu_irq_enter(); | 299 | rcu_irq_enter(); |
| 281 | if (idle_cpu(cpu) && !in_interrupt()) { | 300 | if (idle_cpu(cpu) && !in_interrupt()) { |
| 282 | __irq_enter(); | 301 | /* |
| 302 | * Prevent raise_softirq from needlessly waking up ksoftirqd | ||
| 303 | * here, as softirq will be serviced on return from interrupt. | ||
| 304 | */ | ||
| 305 | local_bh_disable(); | ||
| 283 | tick_check_idle(cpu); | 306 | tick_check_idle(cpu); |
| 284 | } else | 307 | _local_bh_enable(); |
| 285 | __irq_enter(); | 308 | } |
| 309 | |||
| 310 | __irq_enter(); | ||
| 286 | } | 311 | } |
| 287 | 312 | ||
| 288 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
| @@ -696,6 +721,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
| 696 | { | 721 | { |
| 697 | set_current_state(TASK_INTERRUPTIBLE); | 722 | set_current_state(TASK_INTERRUPTIBLE); |
| 698 | 723 | ||
| 724 | current->flags |= PF_KSOFTIRQD; | ||
| 699 | while (!kthread_should_stop()) { | 725 | while (!kthread_should_stop()) { |
| 700 | preempt_disable(); | 726 | preempt_disable(); |
| 701 | if (!local_softirq_pending()) { | 727 | if (!local_softirq_pending()) { |
