diff options
author | Christoph Hellwig <hch@infradead.org> | 2013-11-14 17:32:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 19:32:22 -0500 |
commit | fc21c0cff2f425891b28ff6fb6b03b325c977428 (patch) | |
tree | 920565aba013c83db09bc677c15d17280eee0707 /kernel | |
parent | c853b167e6ec1f25023cfc58ba2f43f9f6f5b49b (diff) |
revert "softirq: Add support for triggering softirq work on softirqs"
This commit was incomplete in that code to remove items from the per-cpu
lists was missing and never acquired a user in the 5 years it has been in
the tree. We're going to implement what it seems to try to archive in a
simpler way, and this code is in the way of doing so.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@suse.cz>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/softirq.c | 131 |
1 files changed, 0 insertions, 131 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index b24988353458..11025ccc06dd 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Distribute under GPLv2. | 6 | * Distribute under GPLv2. |
7 | * | 7 | * |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
9 | * | ||
10 | * Remote softirq infrastructure is by Jens Axboe. | ||
11 | */ | 9 | */ |
12 | 10 | ||
13 | #include <linux/export.h> | 11 | #include <linux/export.h> |
@@ -627,146 +625,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |||
627 | } | 625 | } |
628 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | 626 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
629 | 627 | ||
630 | /* | ||
631 | * Remote softirq bits | ||
632 | */ | ||
633 | |||
634 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | ||
635 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | ||
636 | |||
637 | static void __local_trigger(struct call_single_data *cp, int softirq) | ||
638 | { | ||
639 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | ||
640 | |||
641 | list_add_tail(&cp->list, head); | ||
642 | |||
643 | /* Trigger the softirq only if the list was previously empty. */ | ||
644 | if (head->next == &cp->list) | ||
645 | raise_softirq_irqoff(softirq); | ||
646 | } | ||
647 | |||
648 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
649 | static void remote_softirq_receive(void *data) | ||
650 | { | ||
651 | struct call_single_data *cp = data; | ||
652 | unsigned long flags; | ||
653 | int softirq; | ||
654 | |||
655 | softirq = *(int *)cp->info; | ||
656 | local_irq_save(flags); | ||
657 | __local_trigger(cp, softirq); | ||
658 | local_irq_restore(flags); | ||
659 | } | ||
660 | |||
661 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
662 | { | ||
663 | if (cpu_online(cpu)) { | ||
664 | cp->func = remote_softirq_receive; | ||
665 | cp->info = &softirq; | ||
666 | cp->flags = 0; | ||
667 | |||
668 | __smp_call_function_single(cpu, cp, 0); | ||
669 | return 0; | ||
670 | } | ||
671 | return 1; | ||
672 | } | ||
673 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
674 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
675 | { | ||
676 | return 1; | ||
677 | } | ||
678 | #endif | ||
679 | |||
680 | /** | ||
681 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | ||
682 | * @cp: private SMP call function data area | ||
683 | * @cpu: the remote cpu | ||
684 | * @this_cpu: the currently executing cpu | ||
685 | * @softirq: the softirq for the work | ||
686 | * | ||
687 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | ||
688 | * done, the work is instead queued up on the local cpu. | ||
689 | * | ||
690 | * Interrupts must be disabled. | ||
691 | */ | ||
692 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | ||
693 | { | ||
694 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | ||
695 | __local_trigger(cp, softirq); | ||
696 | } | ||
697 | EXPORT_SYMBOL(__send_remote_softirq); | ||
698 | |||
699 | /** | ||
700 | * send_remote_softirq - try to schedule softirq work on a remote cpu | ||
701 | * @cp: private SMP call function data area | ||
702 | * @cpu: the remote cpu | ||
703 | * @softirq: the softirq for the work | ||
704 | * | ||
705 | * Like __send_remote_softirq except that disabling interrupts and | ||
706 | * computing the current cpu is done for the caller. | ||
707 | */ | ||
708 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | ||
709 | { | ||
710 | unsigned long flags; | ||
711 | int this_cpu; | ||
712 | |||
713 | local_irq_save(flags); | ||
714 | this_cpu = smp_processor_id(); | ||
715 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | ||
716 | local_irq_restore(flags); | ||
717 | } | ||
718 | EXPORT_SYMBOL(send_remote_softirq); | ||
719 | |||
720 | static int remote_softirq_cpu_notify(struct notifier_block *self, | ||
721 | unsigned long action, void *hcpu) | ||
722 | { | ||
723 | /* | ||
724 | * If a CPU goes away, splice its entries to the current CPU | ||
725 | * and trigger a run of the softirq | ||
726 | */ | ||
727 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
728 | int cpu = (unsigned long) hcpu; | ||
729 | int i; | ||
730 | |||
731 | local_irq_disable(); | ||
732 | for (i = 0; i < NR_SOFTIRQS; i++) { | ||
733 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | ||
734 | struct list_head *local_head; | ||
735 | |||
736 | if (list_empty(head)) | ||
737 | continue; | ||
738 | |||
739 | local_head = &__get_cpu_var(softirq_work_list[i]); | ||
740 | list_splice_init(head, local_head); | ||
741 | raise_softirq_irqoff(i); | ||
742 | } | ||
743 | local_irq_enable(); | ||
744 | } | ||
745 | |||
746 | return NOTIFY_OK; | ||
747 | } | ||
748 | |||
749 | static struct notifier_block remote_softirq_cpu_notifier = { | ||
750 | .notifier_call = remote_softirq_cpu_notify, | ||
751 | }; | ||
752 | |||
753 | void __init softirq_init(void) | 628 | void __init softirq_init(void) |
754 | { | 629 | { |
755 | int cpu; | 630 | int cpu; |
756 | 631 | ||
757 | for_each_possible_cpu(cpu) { | 632 | for_each_possible_cpu(cpu) { |
758 | int i; | ||
759 | |||
760 | per_cpu(tasklet_vec, cpu).tail = | 633 | per_cpu(tasklet_vec, cpu).tail = |
761 | &per_cpu(tasklet_vec, cpu).head; | 634 | &per_cpu(tasklet_vec, cpu).head; |
762 | per_cpu(tasklet_hi_vec, cpu).tail = | 635 | per_cpu(tasklet_hi_vec, cpu).tail = |
763 | &per_cpu(tasklet_hi_vec, cpu).head; | 636 | &per_cpu(tasklet_hi_vec, cpu).head; |
764 | for (i = 0; i < NR_SOFTIRQS; i++) | ||
765 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | ||
766 | } | 637 | } |
767 | 638 | ||
768 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); | ||
769 | |||
770 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 639 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
771 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 640 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
772 | } | 641 | } |