aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c157
1 files changed, 147 insertions, 10 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c506f266a6b9..e7c69a720d69 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,6 +6,8 @@
6 * Distribute under GPLv2. 6 * Distribute under GPLv2.
7 * 7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
9 */ 11 */
10 12
11#include <linux/module.h> 13#include <linux/module.h>
@@ -46,7 +48,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
46EXPORT_SYMBOL(irq_stat); 48EXPORT_SYMBOL(irq_stat);
47#endif 49#endif
48 50
49static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; 51static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
50 52
51static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 53static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
52 54
@@ -205,7 +207,18 @@ restart:
205 207
206 do { 208 do {
207 if (pending & 1) { 209 if (pending & 1) {
210 int prev_count = preempt_count();
211
208 h->action(h); 212 h->action(h);
213
214 if (unlikely(prev_count != preempt_count())) {
215 printk(KERN_ERR "huh, entered softirq %td %p"
216 "with preempt_count %08x,"
217 " exited with %08x?\n", h - softirq_vec,
218 h->action, prev_count, preempt_count());
219 preempt_count() = prev_count;
220 }
221
209 rcu_bh_qsctr_inc(cpu); 222 rcu_bh_qsctr_inc(cpu);
210 } 223 }
211 h++; 224 h++;
@@ -254,16 +267,13 @@ asmlinkage void do_softirq(void)
254 */ 267 */
255void irq_enter(void) 268void irq_enter(void)
256{ 269{
257#ifdef CONFIG_NO_HZ
258 int cpu = smp_processor_id(); 270 int cpu = smp_processor_id();
259 if (idle_cpu(cpu) && !in_interrupt()) 271
260 tick_nohz_stop_idle(cpu); 272 if (idle_cpu(cpu) && !in_interrupt()) {
261#endif 273 __irq_enter();
262 __irq_enter(); 274 tick_check_idle(cpu);
263#ifdef CONFIG_NO_HZ 275 } else
264 if (idle_cpu(cpu)) 276 __irq_enter();
265 tick_nohz_update_jiffies();
266#endif
267} 277}
268 278
269#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 279#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -463,17 +473,144 @@ void tasklet_kill(struct tasklet_struct *t)
463 473
464EXPORT_SYMBOL(tasklet_kill); 474EXPORT_SYMBOL(tasklet_kill);
465 475
476DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
477EXPORT_PER_CPU_SYMBOL(softirq_work_list);
478
479static void __local_trigger(struct call_single_data *cp, int softirq)
480{
481 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
482
483 list_add_tail(&cp->list, head);
484
485 /* Trigger the softirq only if the list was previously empty. */
486 if (head->next == &cp->list)
487 raise_softirq_irqoff(softirq);
488}
489
490#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
491static void remote_softirq_receive(void *data)
492{
493 struct call_single_data *cp = data;
494 unsigned long flags;
495 int softirq;
496
497 softirq = cp->priv;
498
499 local_irq_save(flags);
500 __local_trigger(cp, softirq);
501 local_irq_restore(flags);
502}
503
504static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
505{
506 if (cpu_online(cpu)) {
507 cp->func = remote_softirq_receive;
508 cp->info = cp;
509 cp->flags = 0;
510 cp->priv = softirq;
511
512 __smp_call_function_single(cpu, cp);
513 return 0;
514 }
515 return 1;
516}
517#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
518static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
519{
520 return 1;
521}
522#endif
523
524/**
525 * __send_remote_softirq - try to schedule softirq work on a remote cpu
526 * @cp: private SMP call function data area
527 * @cpu: the remote cpu
528 * @this_cpu: the currently executing cpu
529 * @softirq: the softirq for the work
530 *
531 * Attempt to schedule softirq work on a remote cpu. If this cannot be
532 * done, the work is instead queued up on the local cpu.
533 *
534 * Interrupts must be disabled.
535 */
536void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
537{
538 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
539 __local_trigger(cp, softirq);
540}
541EXPORT_SYMBOL(__send_remote_softirq);
542
543/**
544 * send_remote_softirq - try to schedule softirq work on a remote cpu
545 * @cp: private SMP call function data area
546 * @cpu: the remote cpu
547 * @softirq: the softirq for the work
548 *
549 * Like __send_remote_softirq except that disabling interrupts and
550 * computing the current cpu is done for the caller.
551 */
552void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
553{
554 unsigned long flags;
555 int this_cpu;
556
557 local_irq_save(flags);
558 this_cpu = smp_processor_id();
559 __send_remote_softirq(cp, cpu, this_cpu, softirq);
560 local_irq_restore(flags);
561}
562EXPORT_SYMBOL(send_remote_softirq);
563
564static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
565 unsigned long action, void *hcpu)
566{
567 /*
568 * If a CPU goes away, splice its entries to the current CPU
569 * and trigger a run of the softirq
570 */
571 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
572 int cpu = (unsigned long) hcpu;
573 int i;
574
575 local_irq_disable();
576 for (i = 0; i < NR_SOFTIRQS; i++) {
577 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
578 struct list_head *local_head;
579
580 if (list_empty(head))
581 continue;
582
583 local_head = &__get_cpu_var(softirq_work_list[i]);
584 list_splice_init(head, local_head);
585 raise_softirq_irqoff(i);
586 }
587 local_irq_enable();
588 }
589
590 return NOTIFY_OK;
591}
592
593static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
594 .notifier_call = remote_softirq_cpu_notify,
595};
596
466void __init softirq_init(void) 597void __init softirq_init(void)
467{ 598{
468 int cpu; 599 int cpu;
469 600
470 for_each_possible_cpu(cpu) { 601 for_each_possible_cpu(cpu) {
602 int i;
603
471 per_cpu(tasklet_vec, cpu).tail = 604 per_cpu(tasklet_vec, cpu).tail =
472 &per_cpu(tasklet_vec, cpu).head; 605 &per_cpu(tasklet_vec, cpu).head;
473 per_cpu(tasklet_hi_vec, cpu).tail = 606 per_cpu(tasklet_hi_vec, cpu).tail =
474 &per_cpu(tasklet_hi_vec, cpu).head; 607 &per_cpu(tasklet_hi_vec, cpu).head;
608 for (i = 0; i < NR_SOFTIRQS; i++)
609 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
475 } 610 }
476 611
612 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
613
477 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 614 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
478 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 615 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
479} 616}