diff options
-rw-r--r-- | kernel/irq_work.c | 51 |
1 files changed, 23 insertions, 28 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index a82170e2fa78..126f254614bf 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -19,8 +19,8 @@ | |||
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | 20 | ||
21 | 21 | ||
22 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); | 22 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
23 | static DEFINE_PER_CPU(int, irq_work_raised); | 23 | static DEFINE_PER_CPU(struct llist_head, lazy_list); |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Claim the entry so that no one else will poke at it. | 26 | * Claim the entry so that no one else will poke at it. |
@@ -70,15 +70,13 @@ bool irq_work_queue(struct irq_work *work) | |||
70 | /* Queue the entry and raise the IPI if needed. */ | 70 | /* Queue the entry and raise the IPI if needed. */ |
71 | preempt_disable(); | 71 | preempt_disable(); |
72 | 72 | ||
73 | llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); | 73 | /* If the work is "lazy", handle it from next tick if any */ |
74 | 74 | if (work->flags & IRQ_WORK_LAZY) { | |
75 | /* | 75 | if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && |
76 | * If the work is not "lazy" or the tick is stopped, raise the irq | 76 | tick_nohz_tick_stopped()) |
77 | * work interrupt (if supported by the arch), otherwise, just wait | 77 | arch_irq_work_raise(); |
78 | * for the next tick. | 78 | } else { |
79 | */ | 79 | if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) |
80 | if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { | ||
81 | if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) | ||
82 | arch_irq_work_raise(); | 80 | arch_irq_work_raise(); |
83 | } | 81 | } |
84 | 82 | ||
@@ -90,10 +88,11 @@ EXPORT_SYMBOL_GPL(irq_work_queue); | |||
90 | 88 | ||
91 | bool irq_work_needs_cpu(void) | 89 | bool irq_work_needs_cpu(void) |
92 | { | 90 | { |
93 | struct llist_head *this_list; | 91 | struct llist_head *raised, *lazy; |
94 | 92 | ||
95 | this_list = &__get_cpu_var(irq_work_list); | 93 | raised = &__get_cpu_var(raised_list); |
96 | if (llist_empty(this_list)) | 94 | lazy = &__get_cpu_var(lazy_list); |
95 | if (llist_empty(raised) && llist_empty(lazy)) | ||
97 | return false; | 96 | return false; |
98 | 97 | ||
99 | /* All work should have been flushed before going offline */ | 98 | /* All work should have been flushed before going offline */ |
@@ -102,28 +101,18 @@ bool irq_work_needs_cpu(void) | |||
102 | return true; | 101 | return true; |
103 | } | 102 | } |
104 | 103 | ||
105 | static void __irq_work_run(void) | 104 | static void irq_work_run_list(struct llist_head *list) |
106 | { | 105 | { |
107 | unsigned long flags; | 106 | unsigned long flags; |
108 | struct irq_work *work; | 107 | struct irq_work *work; |
109 | struct llist_head *this_list; | ||
110 | struct llist_node *llnode; | 108 | struct llist_node *llnode; |
111 | 109 | ||
110 | BUG_ON(!irqs_disabled()); | ||
112 | 111 | ||
113 | /* | 112 | if (llist_empty(list)) |
114 | * Reset the "raised" state right before we check the list because | ||
115 | * an NMI may enqueue after we find the list empty from the runner. | ||
116 | */ | ||
117 | __this_cpu_write(irq_work_raised, 0); | ||
118 | barrier(); | ||
119 | |||
120 | this_list = &__get_cpu_var(irq_work_list); | ||
121 | if (llist_empty(this_list)) | ||
122 | return; | 113 | return; |
123 | 114 | ||
124 | BUG_ON(!irqs_disabled()); | 115 | llnode = llist_del_all(list); |
125 | |||
126 | llnode = llist_del_all(this_list); | ||
127 | while (llnode != NULL) { | 116 | while (llnode != NULL) { |
128 | work = llist_entry(llnode, struct irq_work, llnode); | 117 | work = llist_entry(llnode, struct irq_work, llnode); |
129 | 118 | ||
@@ -148,6 +137,12 @@ static void __irq_work_run(void) | |||
148 | } | 137 | } |
149 | } | 138 | } |
150 | 139 | ||
140 | static void __irq_work_run(void) | ||
141 | { | ||
142 | irq_work_run_list(&__get_cpu_var(raised_list)); | ||
143 | irq_work_run_list(&__get_cpu_var(lazy_list)); | ||
144 | } | ||
145 | |||
151 | /* | 146 | /* |
152 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | 147 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq |
153 | * context with local IRQs disabled. | 148 | * context with local IRQs disabled. |