diff options
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 176 |
1 files changed, 150 insertions, 26 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index c506f266a6b9..466e75ce271a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * Distribute under GPLv2. | 6 | * Distribute under GPLv2. |
7 | * | 7 | * |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
9 | * | ||
10 | * Remote softirq infrastructure is by Jens Axboe. | ||
9 | */ | 11 | */ |
10 | 12 | ||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -46,7 +48,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |||
46 | EXPORT_SYMBOL(irq_stat); | 48 | EXPORT_SYMBOL(irq_stat); |
47 | #endif | 49 | #endif |
48 | 50 | ||
49 | static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; | 51 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
50 | 52 | ||
51 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | 53 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
52 | 54 | ||
@@ -100,20 +102,6 @@ void local_bh_disable(void) | |||
100 | 102 | ||
101 | EXPORT_SYMBOL(local_bh_disable); | 103 | EXPORT_SYMBOL(local_bh_disable); |
102 | 104 | ||
103 | void __local_bh_enable(void) | ||
104 | { | ||
105 | WARN_ON_ONCE(in_irq()); | ||
106 | |||
107 | /* | ||
108 | * softirqs should never be enabled by __local_bh_enable(), | ||
109 | * it always nests inside local_bh_enable() sections: | ||
110 | */ | ||
111 | WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); | ||
112 | |||
113 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(__local_bh_enable); | ||
116 | |||
117 | /* | 105 | /* |
118 | * Special-case - softirqs can safely be enabled in | 106 | * Special-case - softirqs can safely be enabled in |
119 | * cond_resched_softirq(), or by __do_softirq(), | 107 | * cond_resched_softirq(), or by __do_softirq(), |
@@ -205,7 +193,18 @@ restart: | |||
205 | 193 | ||
206 | do { | 194 | do { |
207 | if (pending & 1) { | 195 | if (pending & 1) { |
196 | int prev_count = preempt_count(); | ||
197 | |||
208 | h->action(h); | 198 | h->action(h); |
199 | |||
200 | if (unlikely(prev_count != preempt_count())) { | ||
201 | printk(KERN_ERR "huh, entered softirq %td %p" | ||
202 | "with preempt_count %08x," | ||
203 | " exited with %08x?\n", h - softirq_vec, | ||
204 | h->action, prev_count, preempt_count()); | ||
205 | preempt_count() = prev_count; | ||
206 | } | ||
207 | |||
209 | rcu_bh_qsctr_inc(cpu); | 208 | rcu_bh_qsctr_inc(cpu); |
210 | } | 209 | } |
211 | h++; | 210 | h++; |
@@ -254,16 +253,14 @@ asmlinkage void do_softirq(void) | |||
254 | */ | 253 | */ |
255 | void irq_enter(void) | 254 | void irq_enter(void) |
256 | { | 255 | { |
257 | #ifdef CONFIG_NO_HZ | ||
258 | int cpu = smp_processor_id(); | 256 | int cpu = smp_processor_id(); |
259 | if (idle_cpu(cpu) && !in_interrupt()) | 257 | |
260 | tick_nohz_stop_idle(cpu); | 258 | rcu_irq_enter(); |
261 | #endif | 259 | if (idle_cpu(cpu) && !in_interrupt()) { |
262 | __irq_enter(); | 260 | __irq_enter(); |
263 | #ifdef CONFIG_NO_HZ | 261 | tick_check_idle(cpu); |
264 | if (idle_cpu(cpu)) | 262 | } else |
265 | tick_nohz_update_jiffies(); | 263 | __irq_enter(); |
266 | #endif | ||
267 | } | 264 | } |
268 | 265 | ||
269 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 266 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
@@ -285,9 +282,9 @@ void irq_exit(void) | |||
285 | 282 | ||
286 | #ifdef CONFIG_NO_HZ | 283 | #ifdef CONFIG_NO_HZ |
287 | /* Make sure that timer wheel updates are propagated */ | 284 | /* Make sure that timer wheel updates are propagated */ |
288 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) | ||
289 | tick_nohz_stop_sched_tick(0); | ||
290 | rcu_irq_exit(); | 285 | rcu_irq_exit(); |
286 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | ||
287 | tick_nohz_stop_sched_tick(0); | ||
291 | #endif | 288 | #endif |
292 | preempt_enable_no_resched(); | 289 | preempt_enable_no_resched(); |
293 | } | 290 | } |
@@ -463,17 +460,144 @@ void tasklet_kill(struct tasklet_struct *t) | |||
463 | 460 | ||
464 | EXPORT_SYMBOL(tasklet_kill); | 461 | EXPORT_SYMBOL(tasklet_kill); |
465 | 462 | ||
463 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | ||
464 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | ||
465 | |||
466 | static void __local_trigger(struct call_single_data *cp, int softirq) | ||
467 | { | ||
468 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | ||
469 | |||
470 | list_add_tail(&cp->list, head); | ||
471 | |||
472 | /* Trigger the softirq only if the list was previously empty. */ | ||
473 | if (head->next == &cp->list) | ||
474 | raise_softirq_irqoff(softirq); | ||
475 | } | ||
476 | |||
477 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
478 | static void remote_softirq_receive(void *data) | ||
479 | { | ||
480 | struct call_single_data *cp = data; | ||
481 | unsigned long flags; | ||
482 | int softirq; | ||
483 | |||
484 | softirq = cp->priv; | ||
485 | |||
486 | local_irq_save(flags); | ||
487 | __local_trigger(cp, softirq); | ||
488 | local_irq_restore(flags); | ||
489 | } | ||
490 | |||
491 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
492 | { | ||
493 | if (cpu_online(cpu)) { | ||
494 | cp->func = remote_softirq_receive; | ||
495 | cp->info = cp; | ||
496 | cp->flags = 0; | ||
497 | cp->priv = softirq; | ||
498 | |||
499 | __smp_call_function_single(cpu, cp); | ||
500 | return 0; | ||
501 | } | ||
502 | return 1; | ||
503 | } | ||
504 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
505 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
506 | { | ||
507 | return 1; | ||
508 | } | ||
509 | #endif | ||
510 | |||
511 | /** | ||
512 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | ||
513 | * @cp: private SMP call function data area | ||
514 | * @cpu: the remote cpu | ||
515 | * @this_cpu: the currently executing cpu | ||
516 | * @softirq: the softirq for the work | ||
517 | * | ||
518 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | ||
519 | * done, the work is instead queued up on the local cpu. | ||
520 | * | ||
521 | * Interrupts must be disabled. | ||
522 | */ | ||
523 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | ||
524 | { | ||
525 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | ||
526 | __local_trigger(cp, softirq); | ||
527 | } | ||
528 | EXPORT_SYMBOL(__send_remote_softirq); | ||
529 | |||
530 | /** | ||
531 | * send_remote_softirq - try to schedule softirq work on a remote cpu | ||
532 | * @cp: private SMP call function data area | ||
533 | * @cpu: the remote cpu | ||
534 | * @softirq: the softirq for the work | ||
535 | * | ||
536 | * Like __send_remote_softirq except that disabling interrupts and | ||
537 | * computing the current cpu is done for the caller. | ||
538 | */ | ||
539 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
540 | { | ||
541 | unsigned long flags; | ||
542 | int this_cpu; | ||
543 | |||
544 | local_irq_save(flags); | ||
545 | this_cpu = smp_processor_id(); | ||
546 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | ||
547 | local_irq_restore(flags); | ||
548 | } | ||
549 | EXPORT_SYMBOL(send_remote_softirq); | ||
550 | |||
551 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | ||
552 | unsigned long action, void *hcpu) | ||
553 | { | ||
554 | /* | ||
555 | * If a CPU goes away, splice its entries to the current CPU | ||
556 | * and trigger a run of the softirq | ||
557 | */ | ||
558 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
559 | int cpu = (unsigned long) hcpu; | ||
560 | int i; | ||
561 | |||
562 | local_irq_disable(); | ||
563 | for (i = 0; i < NR_SOFTIRQS; i++) { | ||
564 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | ||
565 | struct list_head *local_head; | ||
566 | |||
567 | if (list_empty(head)) | ||
568 | continue; | ||
569 | |||
570 | local_head = &__get_cpu_var(softirq_work_list[i]); | ||
571 | list_splice_init(head, local_head); | ||
572 | raise_softirq_irqoff(i); | ||
573 | } | ||
574 | local_irq_enable(); | ||
575 | } | ||
576 | |||
577 | return NOTIFY_OK; | ||
578 | } | ||
579 | |||
580 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | ||
581 | .notifier_call = remote_softirq_cpu_notify, | ||
582 | }; | ||
583 | |||
466 | void __init softirq_init(void) | 584 | void __init softirq_init(void) |
467 | { | 585 | { |
468 | int cpu; | 586 | int cpu; |
469 | 587 | ||
470 | for_each_possible_cpu(cpu) { | 588 | for_each_possible_cpu(cpu) { |
589 | int i; | ||
590 | |||
471 | per_cpu(tasklet_vec, cpu).tail = | 591 | per_cpu(tasklet_vec, cpu).tail = |
472 | &per_cpu(tasklet_vec, cpu).head; | 592 | &per_cpu(tasklet_vec, cpu).head; |
473 | per_cpu(tasklet_hi_vec, cpu).tail = | 593 | per_cpu(tasklet_hi_vec, cpu).tail = |
474 | &per_cpu(tasklet_hi_vec, cpu).head; | 594 | &per_cpu(tasklet_hi_vec, cpu).head; |
595 | for (i = 0; i < NR_SOFTIRQS; i++) | ||
596 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | ||
475 | } | 597 | } |
476 | 598 | ||
599 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); | ||
600 | |||
477 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 601 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
478 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 602 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
479 | } | 603 | } |