aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c129
1 files changed, 129 insertions, 0 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 37d67aa2d56f..83ba21a13bd4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,6 +6,8 @@
6 * Distribute under GPLv2. 6 * Distribute under GPLv2.
7 * 7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
9 */ 11 */
10 12
11#include <linux/module.h> 13#include <linux/module.h>
@@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t)
474 476
475EXPORT_SYMBOL(tasklet_kill); 477EXPORT_SYMBOL(tasklet_kill);
476 478
479DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
480EXPORT_PER_CPU_SYMBOL(softirq_work_list);
481
482static void __local_trigger(struct call_single_data *cp, int softirq)
483{
484 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
485
486 list_add_tail(&cp->list, head);
487
488 /* Trigger the softirq only if the list was previously empty. */
489 if (head->next == &cp->list)
490 raise_softirq_irqoff(softirq);
491}
492
493#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
494static void remote_softirq_receive(void *data)
495{
496 struct call_single_data *cp = data;
497 unsigned long flags;
498 int softirq;
499
500 softirq = cp->priv;
501
502 local_irq_save(flags);
503 __local_trigger(cp, softirq);
504 local_irq_restore(flags);
505}
506
507static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
508{
509 if (cpu_online(cpu)) {
510 cp->func = remote_softirq_receive;
511 cp->info = cp;
512 cp->flags = 0;
513 cp->priv = softirq;
514
515 __smp_call_function_single(cpu, cp);
516 return 0;
517 }
518 return 1;
519}
520#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
521static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
522{
523 return 1;
524}
525#endif
526
527/**
528 * __send_remote_softirq - try to schedule softirq work on a remote cpu
529 * @cp: private SMP call function data area
530 * @cpu: the remote cpu
531 * @this_cpu: the currently executing cpu
532 * @softirq: the softirq for the work
533 *
534 * Attempt to schedule softirq work on a remote cpu. If this cannot be
535 * done, the work is instead queued up on the local cpu.
536 *
537 * Interrupts must be disabled.
538 */
539void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
540{
541 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
542 __local_trigger(cp, softirq);
543}
544EXPORT_SYMBOL(__send_remote_softirq);
545
546/**
547 * send_remote_softirq - try to schedule softirq work on a remote cpu
548 * @cp: private SMP call function data area
549 * @cpu: the remote cpu
550 * @softirq: the softirq for the work
551 *
552 * Like __send_remote_softirq except that disabling interrupts and
553 * computing the current cpu is done for the caller.
554 */
555void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
556{
557 unsigned long flags;
558 int this_cpu;
559
560 local_irq_save(flags);
561 this_cpu = smp_processor_id();
562 __send_remote_softirq(cp, cpu, this_cpu, softirq);
563 local_irq_restore(flags);
564}
565EXPORT_SYMBOL(send_remote_softirq);
566
567static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
568 unsigned long action, void *hcpu)
569{
570 /*
571 * If a CPU goes away, splice its entries to the current CPU
572 * and trigger a run of the softirq
573 */
574 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
575 int cpu = (unsigned long) hcpu;
576 int i;
577
578 local_irq_disable();
579 for (i = 0; i < NR_SOFTIRQS; i++) {
580 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
581 struct list_head *local_head;
582
583 if (list_empty(head))
584 continue;
585
586 local_head = &__get_cpu_var(softirq_work_list[i]);
587 list_splice_init(head, local_head);
588 raise_softirq_irqoff(i);
589 }
590 local_irq_enable();
591 }
592
593 return NOTIFY_OK;
594}
595
596static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
597 .notifier_call = remote_softirq_cpu_notify,
598};
599
477void __init softirq_init(void) 600void __init softirq_init(void)
478{ 601{
479 int cpu; 602 int cpu;
480 603
481 for_each_possible_cpu(cpu) { 604 for_each_possible_cpu(cpu) {
605 int i;
606
482 per_cpu(tasklet_vec, cpu).tail = 607 per_cpu(tasklet_vec, cpu).tail =
483 &per_cpu(tasklet_vec, cpu).head; 608 &per_cpu(tasklet_vec, cpu).head;
484 per_cpu(tasklet_hi_vec, cpu).tail = 609 per_cpu(tasklet_hi_vec, cpu).tail =
485 &per_cpu(tasklet_hi_vec, cpu).head; 610 &per_cpu(tasklet_hi_vec, cpu).head;
611 for (i = 0; i < NR_SOFTIRQS; i++)
612 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
486 } 613 }
487 614
615 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
616
488 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 617 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
489 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 618 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
490} 619}