diff options
Diffstat (limited to 'kernel/softirq.c')
| -rw-r--r-- | kernel/softirq.c | 117 |
1 files changed, 30 insertions, 87 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index b73e681df09e..cc96bdc0c2c9 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
| 24 | #include <linux/ftrace.h> | 24 | #include <linux/ftrace.h> |
| 25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
| 26 | #include <linux/smpboot.h> | ||
| 26 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
| 27 | 28 | ||
| 28 | #define CREATE_TRACE_POINTS | 29 | #define CREATE_TRACE_POINTS |
| @@ -220,7 +221,7 @@ asmlinkage void __do_softirq(void) | |||
| 220 | current->flags &= ~PF_MEMALLOC; | 221 | current->flags &= ~PF_MEMALLOC; |
| 221 | 222 | ||
| 222 | pending = local_softirq_pending(); | 223 | pending = local_softirq_pending(); |
| 223 | account_system_vtime(current); | 224 | vtime_account(current); |
| 224 | 225 | ||
| 225 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 226 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 226 | SOFTIRQ_OFFSET); | 227 | SOFTIRQ_OFFSET); |
| @@ -271,7 +272,7 @@ restart: | |||
| 271 | 272 | ||
| 272 | lockdep_softirq_exit(); | 273 | lockdep_softirq_exit(); |
| 273 | 274 | ||
| 274 | account_system_vtime(current); | 275 | vtime_account(current); |
| 275 | __local_bh_enable(SOFTIRQ_OFFSET); | 276 | __local_bh_enable(SOFTIRQ_OFFSET); |
| 276 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 277 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
| 277 | } | 278 | } |
| @@ -340,7 +341,7 @@ static inline void invoke_softirq(void) | |||
| 340 | */ | 341 | */ |
| 341 | void irq_exit(void) | 342 | void irq_exit(void) |
| 342 | { | 343 | { |
| 343 | account_system_vtime(current); | 344 | vtime_account(current); |
| 344 | trace_hardirq_exit(); | 345 | trace_hardirq_exit(); |
| 345 | sub_preempt_count(IRQ_EXIT_OFFSET); | 346 | sub_preempt_count(IRQ_EXIT_OFFSET); |
| 346 | if (!in_interrupt() && local_softirq_pending()) | 347 | if (!in_interrupt() && local_softirq_pending()) |
| @@ -742,49 +743,22 @@ void __init softirq_init(void) | |||
| 742 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 743 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
| 743 | } | 744 | } |
| 744 | 745 | ||
| 745 | static int run_ksoftirqd(void * __bind_cpu) | 746 | static int ksoftirqd_should_run(unsigned int cpu) |
| 746 | { | 747 | { |
| 747 | set_current_state(TASK_INTERRUPTIBLE); | 748 | return local_softirq_pending(); |
| 748 | 749 | } | |
| 749 | while (!kthread_should_stop()) { | ||
| 750 | preempt_disable(); | ||
| 751 | if (!local_softirq_pending()) { | ||
| 752 | schedule_preempt_disabled(); | ||
| 753 | } | ||
| 754 | |||
| 755 | __set_current_state(TASK_RUNNING); | ||
| 756 | |||
| 757 | while (local_softirq_pending()) { | ||
| 758 | /* Preempt disable stops cpu going offline. | ||
| 759 | If already offline, we'll be on wrong CPU: | ||
| 760 | don't process */ | ||
| 761 | if (cpu_is_offline((long)__bind_cpu)) | ||
| 762 | goto wait_to_die; | ||
| 763 | local_irq_disable(); | ||
| 764 | if (local_softirq_pending()) | ||
| 765 | __do_softirq(); | ||
| 766 | local_irq_enable(); | ||
| 767 | sched_preempt_enable_no_resched(); | ||
| 768 | cond_resched(); | ||
| 769 | preempt_disable(); | ||
| 770 | rcu_note_context_switch((long)__bind_cpu); | ||
| 771 | } | ||
| 772 | preempt_enable(); | ||
| 773 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 774 | } | ||
| 775 | __set_current_state(TASK_RUNNING); | ||
| 776 | return 0; | ||
| 777 | 750 | ||
| 778 | wait_to_die: | 751 | static void run_ksoftirqd(unsigned int cpu) |
| 779 | preempt_enable(); | 752 | { |
| 780 | /* Wait for kthread_stop */ | 753 | local_irq_disable(); |
| 781 | set_current_state(TASK_INTERRUPTIBLE); | 754 | if (local_softirq_pending()) { |
| 782 | while (!kthread_should_stop()) { | 755 | __do_softirq(); |
| 783 | schedule(); | 756 | rcu_note_context_switch(cpu); |
| 784 | set_current_state(TASK_INTERRUPTIBLE); | 757 | local_irq_enable(); |
| 758 | cond_resched(); | ||
| 759 | return; | ||
| 785 | } | 760 | } |
| 786 | __set_current_state(TASK_RUNNING); | 761 | local_irq_enable(); |
| 787 | return 0; | ||
| 788 | } | 762 | } |
| 789 | 763 | ||
| 790 | #ifdef CONFIG_HOTPLUG_CPU | 764 | #ifdef CONFIG_HOTPLUG_CPU |
| @@ -850,50 +824,14 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
| 850 | unsigned long action, | 824 | unsigned long action, |
| 851 | void *hcpu) | 825 | void *hcpu) |
| 852 | { | 826 | { |
| 853 | int hotcpu = (unsigned long)hcpu; | ||
| 854 | struct task_struct *p; | ||
| 855 | |||
| 856 | switch (action) { | 827 | switch (action) { |
| 857 | case CPU_UP_PREPARE: | ||
| 858 | case CPU_UP_PREPARE_FROZEN: | ||
| 859 | p = kthread_create_on_node(run_ksoftirqd, | ||
| 860 | hcpu, | ||
| 861 | cpu_to_node(hotcpu), | ||
| 862 | "ksoftirqd/%d", hotcpu); | ||
| 863 | if (IS_ERR(p)) { | ||
| 864 | printk("ksoftirqd for %i failed\n", hotcpu); | ||
| 865 | return notifier_from_errno(PTR_ERR(p)); | ||
| 866 | } | ||
| 867 | kthread_bind(p, hotcpu); | ||
| 868 | per_cpu(ksoftirqd, hotcpu) = p; | ||
| 869 | break; | ||
| 870 | case CPU_ONLINE: | ||
| 871 | case CPU_ONLINE_FROZEN: | ||
| 872 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); | ||
| 873 | break; | ||
| 874 | #ifdef CONFIG_HOTPLUG_CPU | 828 | #ifdef CONFIG_HOTPLUG_CPU |
| 875 | case CPU_UP_CANCELED: | ||
| 876 | case CPU_UP_CANCELED_FROZEN: | ||
| 877 | if (!per_cpu(ksoftirqd, hotcpu)) | ||
| 878 | break; | ||
| 879 | /* Unbind so it can run. Fall thru. */ | ||
| 880 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | ||
| 881 | cpumask_any(cpu_online_mask)); | ||
| 882 | case CPU_DEAD: | 829 | case CPU_DEAD: |
| 883 | case CPU_DEAD_FROZEN: { | 830 | case CPU_DEAD_FROZEN: |
| 884 | static const struct sched_param param = { | 831 | takeover_tasklets((unsigned long)hcpu); |
| 885 | .sched_priority = MAX_RT_PRIO-1 | ||
| 886 | }; | ||
| 887 | |||
| 888 | p = per_cpu(ksoftirqd, hotcpu); | ||
| 889 | per_cpu(ksoftirqd, hotcpu) = NULL; | ||
| 890 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
| 891 | kthread_stop(p); | ||
| 892 | takeover_tasklets(hotcpu); | ||
| 893 | break; | 832 | break; |
| 894 | } | ||
| 895 | #endif /* CONFIG_HOTPLUG_CPU */ | 833 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 896 | } | 834 | } |
| 897 | return NOTIFY_OK; | 835 | return NOTIFY_OK; |
| 898 | } | 836 | } |
| 899 | 837 | ||
| @@ -901,14 +839,19 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
| 901 | .notifier_call = cpu_callback | 839 | .notifier_call = cpu_callback |
| 902 | }; | 840 | }; |
| 903 | 841 | ||
| 842 | static struct smp_hotplug_thread softirq_threads = { | ||
| 843 | .store = &ksoftirqd, | ||
| 844 | .thread_should_run = ksoftirqd_should_run, | ||
| 845 | .thread_fn = run_ksoftirqd, | ||
| 846 | .thread_comm = "ksoftirqd/%u", | ||
| 847 | }; | ||
| 848 | |||
| 904 | static __init int spawn_ksoftirqd(void) | 849 | static __init int spawn_ksoftirqd(void) |
| 905 | { | 850 | { |
| 906 | void *cpu = (void *)(long)smp_processor_id(); | ||
| 907 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | ||
| 908 | |||
| 909 | BUG_ON(err != NOTIFY_OK); | ||
| 910 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | ||
| 911 | register_cpu_notifier(&cpu_nfb); | 851 | register_cpu_notifier(&cpu_nfb); |
| 852 | |||
| 853 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | ||
| 854 | |||
| 912 | return 0; | 855 | return 0; |
| 913 | } | 856 | } |
| 914 | early_initcall(spawn_ksoftirqd); | 857 | early_initcall(spawn_ksoftirqd); |
