diff options
Diffstat (limited to 'kernel/softirq.c')
| -rw-r--r-- | kernel/softirq.c | 171 |
1 files changed, 147 insertions, 24 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 336f92d64e2e..bf25015dce16 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -62,6 +62,137 @@ static inline void wakeup_softirqd(void) | |||
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * This one is for softirq.c-internal use, | ||
| 66 | * where hardirqs are disabled legitimately: | ||
| 67 | */ | ||
| 68 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 69 | static void __local_bh_disable(unsigned long ip) | ||
| 70 | { | ||
| 71 | unsigned long flags; | ||
| 72 | |||
| 73 | WARN_ON_ONCE(in_irq()); | ||
| 74 | |||
| 75 | raw_local_irq_save(flags); | ||
| 76 | add_preempt_count(SOFTIRQ_OFFSET); | ||
| 77 | /* | ||
| 78 | * Were softirqs turned off above: | ||
| 79 | */ | ||
| 80 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 81 | trace_softirqs_off(ip); | ||
| 82 | raw_local_irq_restore(flags); | ||
| 83 | } | ||
| 84 | #else /* !CONFIG_TRACE_IRQFLAGS */ | ||
| 85 | static inline void __local_bh_disable(unsigned long ip) | ||
| 86 | { | ||
| 87 | add_preempt_count(SOFTIRQ_OFFSET); | ||
| 88 | barrier(); | ||
| 89 | } | ||
| 90 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 91 | |||
| 92 | void local_bh_disable(void) | ||
| 93 | { | ||
| 94 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | ||
| 95 | } | ||
| 96 | |||
| 97 | EXPORT_SYMBOL(local_bh_disable); | ||
| 98 | |||
| 99 | void __local_bh_enable(void) | ||
| 100 | { | ||
| 101 | WARN_ON_ONCE(in_irq()); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * softirqs should never be enabled by __local_bh_enable(), | ||
| 105 | * it always nests inside local_bh_enable() sections: | ||
| 106 | */ | ||
| 107 | WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); | ||
| 108 | |||
| 109 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(__local_bh_enable); | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Special-case - softirqs can safely be enabled in | ||
| 115 | * cond_resched_softirq(), or by __do_softirq(), | ||
| 116 | * without processing still-pending softirqs: | ||
| 117 | */ | ||
| 118 | void _local_bh_enable(void) | ||
| 119 | { | ||
| 120 | WARN_ON_ONCE(in_irq()); | ||
| 121 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 122 | |||
| 123 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 124 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 125 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
| 126 | } | ||
| 127 | |||
| 128 | EXPORT_SYMBOL(_local_bh_enable); | ||
| 129 | |||
| 130 | void local_bh_enable(void) | ||
| 131 | { | ||
| 132 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 133 | unsigned long flags; | ||
| 134 | |||
| 135 | WARN_ON_ONCE(in_irq()); | ||
| 136 | #endif | ||
| 137 | WARN_ON_ONCE(irqs_disabled()); | ||
| 138 | |||
| 139 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 140 | local_irq_save(flags); | ||
| 141 | #endif | ||
| 142 | /* | ||
| 143 | * Are softirqs going to be turned on now: | ||
| 144 | */ | ||
| 145 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 146 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 147 | /* | ||
| 148 | * Keep preemption disabled until we are done with | ||
| 149 | * softirq processing: | ||
| 150 | */ | ||
| 151 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | ||
| 152 | |||
| 153 | if (unlikely(!in_interrupt() && local_softirq_pending())) | ||
| 154 | do_softirq(); | ||
| 155 | |||
| 156 | dec_preempt_count(); | ||
| 157 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 158 | local_irq_restore(flags); | ||
| 159 | #endif | ||
| 160 | preempt_check_resched(); | ||
| 161 | } | ||
| 162 | EXPORT_SYMBOL(local_bh_enable); | ||
| 163 | |||
| 164 | void local_bh_enable_ip(unsigned long ip) | ||
| 165 | { | ||
| 166 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 167 | unsigned long flags; | ||
| 168 | |||
| 169 | WARN_ON_ONCE(in_irq()); | ||
| 170 | |||
| 171 | local_irq_save(flags); | ||
| 172 | #endif | ||
| 173 | /* | ||
| 174 | * Are softirqs going to be turned on now: | ||
| 175 | */ | ||
| 176 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 177 | trace_softirqs_on(ip); | ||
| 178 | /* | ||
| 179 | * Keep preemption disabled until we are done with | ||
| 180 | * softirq processing: | ||
| 181 | */ | ||
| 182 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | ||
| 183 | |||
| 184 | if (unlikely(!in_interrupt() && local_softirq_pending())) | ||
| 185 | do_softirq(); | ||
| 186 | |||
| 187 | dec_preempt_count(); | ||
| 188 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 189 | local_irq_restore(flags); | ||
| 190 | #endif | ||
| 191 | preempt_check_resched(); | ||
| 192 | } | ||
| 193 | EXPORT_SYMBOL(local_bh_enable_ip); | ||
| 194 | |||
| 195 | /* | ||
| 65 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 196 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
| 66 | * and we fall back to softirqd after that. | 197 | * and we fall back to softirqd after that. |
| 67 | * | 198 | * |
| @@ -80,8 +211,11 @@ asmlinkage void __do_softirq(void) | |||
| 80 | int cpu; | 211 | int cpu; |
| 81 | 212 | ||
| 82 | pending = local_softirq_pending(); | 213 | pending = local_softirq_pending(); |
| 214 | account_system_vtime(current); | ||
| 215 | |||
| 216 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | ||
| 217 | trace_softirq_enter(); | ||
| 83 | 218 | ||
| 84 | local_bh_disable(); | ||
| 85 | cpu = smp_processor_id(); | 219 | cpu = smp_processor_id(); |
| 86 | restart: | 220 | restart: |
| 87 | /* Reset the pending bitmask before enabling irqs */ | 221 | /* Reset the pending bitmask before enabling irqs */ |
| @@ -109,7 +243,10 @@ restart: | |||
| 109 | if (pending) | 243 | if (pending) |
| 110 | wakeup_softirqd(); | 244 | wakeup_softirqd(); |
| 111 | 245 | ||
| 112 | __local_bh_enable(); | 246 | trace_softirq_exit(); |
| 247 | |||
| 248 | account_system_vtime(current); | ||
| 249 | _local_bh_enable(); | ||
| 113 | } | 250 | } |
| 114 | 251 | ||
| 115 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 252 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
| @@ -136,23 +273,6 @@ EXPORT_SYMBOL(do_softirq); | |||
| 136 | 273 | ||
| 137 | #endif | 274 | #endif |
| 138 | 275 | ||
| 139 | void local_bh_enable(void) | ||
| 140 | { | ||
| 141 | WARN_ON(irqs_disabled()); | ||
| 142 | /* | ||
| 143 | * Keep preemption disabled until we are done with | ||
| 144 | * softirq processing: | ||
| 145 | */ | ||
| 146 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | ||
| 147 | |||
| 148 | if (unlikely(!in_interrupt() && local_softirq_pending())) | ||
| 149 | do_softirq(); | ||
| 150 | |||
| 151 | dec_preempt_count(); | ||
| 152 | preempt_check_resched(); | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL(local_bh_enable); | ||
| 155 | |||
| 156 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 276 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
| 157 | # define invoke_softirq() __do_softirq() | 277 | # define invoke_softirq() __do_softirq() |
| 158 | #else | 278 | #else |
| @@ -165,6 +285,7 @@ EXPORT_SYMBOL(local_bh_enable); | |||
| 165 | void irq_exit(void) | 285 | void irq_exit(void) |
| 166 | { | 286 | { |
| 167 | account_system_vtime(current); | 287 | account_system_vtime(current); |
| 288 | trace_hardirq_exit(); | ||
| 168 | sub_preempt_count(IRQ_EXIT_OFFSET); | 289 | sub_preempt_count(IRQ_EXIT_OFFSET); |
| 169 | if (!in_interrupt() && local_softirq_pending()) | 290 | if (!in_interrupt() && local_softirq_pending()) |
| 170 | invoke_softirq(); | 291 | invoke_softirq(); |
| @@ -208,8 +329,6 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) | |||
| 208 | softirq_vec[nr].action = action; | 329 | softirq_vec[nr].action = action; |
| 209 | } | 330 | } |
| 210 | 331 | ||
| 211 | EXPORT_SYMBOL(open_softirq); | ||
| 212 | |||
| 213 | /* Tasklets */ | 332 | /* Tasklets */ |
| 214 | struct tasklet_head | 333 | struct tasklet_head |
| 215 | { | 334 | { |
| @@ -446,7 +565,7 @@ static void takeover_tasklets(unsigned int cpu) | |||
| 446 | } | 565 | } |
| 447 | #endif /* CONFIG_HOTPLUG_CPU */ | 566 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 448 | 567 | ||
| 449 | static int cpu_callback(struct notifier_block *nfb, | 568 | static int __cpuinit cpu_callback(struct notifier_block *nfb, |
| 450 | unsigned long action, | 569 | unsigned long action, |
| 451 | void *hcpu) | 570 | void *hcpu) |
| 452 | { | 571 | { |
| @@ -470,6 +589,8 @@ static int cpu_callback(struct notifier_block *nfb, | |||
| 470 | break; | 589 | break; |
| 471 | #ifdef CONFIG_HOTPLUG_CPU | 590 | #ifdef CONFIG_HOTPLUG_CPU |
| 472 | case CPU_UP_CANCELED: | 591 | case CPU_UP_CANCELED: |
| 592 | if (!per_cpu(ksoftirqd, hotcpu)) | ||
| 593 | break; | ||
| 473 | /* Unbind so it can run. Fall thru. */ | 594 | /* Unbind so it can run. Fall thru. */ |
| 474 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | 595 | kthread_bind(per_cpu(ksoftirqd, hotcpu), |
| 475 | any_online_cpu(cpu_online_map)); | 596 | any_online_cpu(cpu_online_map)); |
| @@ -484,14 +605,16 @@ static int cpu_callback(struct notifier_block *nfb, | |||
| 484 | return NOTIFY_OK; | 605 | return NOTIFY_OK; |
| 485 | } | 606 | } |
| 486 | 607 | ||
| 487 | static struct notifier_block cpu_nfb = { | 608 | static struct notifier_block __cpuinitdata cpu_nfb = { |
| 488 | .notifier_call = cpu_callback | 609 | .notifier_call = cpu_callback |
| 489 | }; | 610 | }; |
| 490 | 611 | ||
| 491 | __init int spawn_ksoftirqd(void) | 612 | __init int spawn_ksoftirqd(void) |
| 492 | { | 613 | { |
| 493 | void *cpu = (void *)(long)smp_processor_id(); | 614 | void *cpu = (void *)(long)smp_processor_id(); |
| 494 | cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 615 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
| 616 | |||
| 617 | BUG_ON(err == NOTIFY_BAD); | ||
| 495 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 618 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
| 496 | register_cpu_notifier(&cpu_nfb); | 619 | register_cpu_notifier(&cpu_nfb); |
| 497 | return 0; | 620 | return 0; |
