diff options
-rw-r--r-- | include/linux/irq_work.h | 14 | ||||
-rw-r--r-- | kernel/irq_work.c | 47 |
2 files changed, 41 insertions, 20 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index a69704f37204..b28eb60c8bf6 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -3,6 +3,20 @@ | |||
3 | 3 | ||
4 | #include <linux/llist.h> | 4 | #include <linux/llist.h> |
5 | 5 | ||
6 | /* | ||
7 | * An entry can be in one of four states: | ||
8 | * | ||
9 | * free NULL, 0 -> {claimed} : free to be used | ||
10 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | ||
11 | * pending next, 3 -> {busy} : queued, pending callback | ||
12 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | ||
13 | */ | ||
14 | |||
15 | #define IRQ_WORK_PENDING 1UL | ||
16 | #define IRQ_WORK_BUSY 2UL | ||
17 | #define IRQ_WORK_FLAGS 3UL | ||
18 | #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ | ||
19 | |||
6 | struct irq_work { | 20 | struct irq_work { |
7 | unsigned long flags; | 21 | unsigned long flags; |
8 | struct llist_node llnode; | 22 | struct llist_node llnode; |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 480f74715ba9..7f3a59bc8e3d 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -12,24 +12,15 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
15 | #include <linux/sched.h> | ||
16 | #include <linux/tick.h> | ||
15 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
16 | #include <linux/notifier.h> | 18 | #include <linux/notifier.h> |
17 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
18 | 20 | ||
19 | /* | ||
20 | * An entry can be in one of four states: | ||
21 | * | ||
22 | * free NULL, 0 -> {claimed} : free to be used | ||
23 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | ||
24 | * pending next, 3 -> {busy} : queued, pending callback | ||
25 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | ||
26 | */ | ||
27 | |||
28 | #define IRQ_WORK_PENDING 1UL | ||
29 | #define IRQ_WORK_BUSY 2UL | ||
30 | #define IRQ_WORK_FLAGS 3UL | ||
31 | 21 | ||
32 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); | 22 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); |
23 | static DEFINE_PER_CPU(int, irq_work_raised); | ||
33 | 24 | ||
34 | /* | 25 | /* |
35 | * Claim the entry so that no one else will poke at it. | 26 | * Claim the entry so that no one else will poke at it. |
@@ -69,14 +60,19 @@ void __weak arch_irq_work_raise(void) | |||
69 | */ | 60 | */ |
70 | static void __irq_work_queue(struct irq_work *work) | 61 | static void __irq_work_queue(struct irq_work *work) |
71 | { | 62 | { |
72 | bool empty; | ||
73 | |||
74 | preempt_disable(); | 63 | preempt_disable(); |
75 | 64 | ||
76 | empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); | 65 | llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); |
77 | /* The list was empty, raise self-interrupt to start processing. */ | 66 | |
78 | if (empty) | 67 | /* |
79 | arch_irq_work_raise(); | 68 | * If the work is not "lazy" or the tick is stopped, raise the irq |
69 | * work interrupt (if supported by the arch), otherwise, just wait | ||
70 | * for the next tick. | ||
71 | */ | ||
72 | if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { | ||
73 | if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) | ||
74 | arch_irq_work_raise(); | ||
75 | } | ||
80 | 76 | ||
81 | preempt_enable(); | 77 | preempt_enable(); |
82 | } | 78 | } |
@@ -117,10 +113,19 @@ bool irq_work_needs_cpu(void) | |||
117 | 113 | ||
118 | static void __irq_work_run(void) | 114 | static void __irq_work_run(void) |
119 | { | 115 | { |
116 | unsigned long flags; | ||
120 | struct irq_work *work; | 117 | struct irq_work *work; |
121 | struct llist_head *this_list; | 118 | struct llist_head *this_list; |
122 | struct llist_node *llnode; | 119 | struct llist_node *llnode; |
123 | 120 | ||
121 | |||
122 | /* | ||
123 | * Reset the "raised" state right before we check the list because | ||
124 | * an NMI may enqueue after we find the list empty from the runner. | ||
125 | */ | ||
126 | __this_cpu_write(irq_work_raised, 0); | ||
127 | barrier(); | ||
128 | |||
124 | this_list = &__get_cpu_var(irq_work_list); | 129 | this_list = &__get_cpu_var(irq_work_list); |
125 | if (llist_empty(this_list)) | 130 | if (llist_empty(this_list)) |
126 | return; | 131 | return; |
@@ -140,13 +145,15 @@ static void __irq_work_run(void) | |||
140 | * to claim that work don't rely on us to handle their data | 145 | * to claim that work don't rely on us to handle their data |
141 | * while we are in the middle of the func. | 146 | * while we are in the middle of the func. |
142 | */ | 147 | */ |
143 | xchg(&work->flags, IRQ_WORK_BUSY); | 148 | flags = work->flags & ~IRQ_WORK_PENDING; |
149 | xchg(&work->flags, flags); | ||
150 | |||
144 | work->func(work); | 151 | work->func(work); |
145 | /* | 152 | /* |
146 | * Clear the BUSY bit and return to the free state if | 153 | * Clear the BUSY bit and return to the free state if |
147 | * no-one else claimed it meanwhile. | 154 | * no-one else claimed it meanwhile. |
148 | */ | 155 | */ |
149 | (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); | 156 | (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
150 | } | 157 | } |
151 | } | 158 | } |
152 | 159 | ||