diff options
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r-- | kernel/irq_work.c | 95 |
1 files changed, 36 insertions, 59 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index c58fa7da8aef..c3c46c72046e 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -6,9 +6,11 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | #include <linux/irq_work.h> | 10 | #include <linux/irq_work.h> |
11 | #include <linux/percpu.h> | ||
11 | #include <linux/hardirq.h> | 12 | #include <linux/hardirq.h> |
13 | #include <asm/processor.h> | ||
12 | 14 | ||
13 | /* | 15 | /* |
14 | * An entry can be in one of four states: | 16 | * An entry can be in one of four states: |
@@ -17,54 +19,34 @@ | |||
17 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | 19 | * claimed NULL, 3 -> {pending} : claimed to be enqueued |
18 | * pending next, 3 -> {busy} : queued, pending callback | 20 | * pending next, 3 -> {busy} : queued, pending callback |
19 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | 21 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
20 | * | ||
21 | * We use the lower two bits of the next pointer to keep PENDING and BUSY | ||
22 | * flags. | ||
23 | */ | 22 | */ |
24 | 23 | ||
25 | #define IRQ_WORK_PENDING 1UL | 24 | #define IRQ_WORK_PENDING 1UL |
26 | #define IRQ_WORK_BUSY 2UL | 25 | #define IRQ_WORK_BUSY 2UL |
27 | #define IRQ_WORK_FLAGS 3UL | 26 | #define IRQ_WORK_FLAGS 3UL |
28 | 27 | ||
29 | static inline bool irq_work_is_set(struct irq_work *entry, int flags) | 28 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); |
30 | { | ||
31 | return (unsigned long)entry->next & flags; | ||
32 | } | ||
33 | |||
34 | static inline struct irq_work *irq_work_next(struct irq_work *entry) | ||
35 | { | ||
36 | unsigned long next = (unsigned long)entry->next; | ||
37 | next &= ~IRQ_WORK_FLAGS; | ||
38 | return (struct irq_work *)next; | ||
39 | } | ||
40 | |||
41 | static inline struct irq_work *next_flags(struct irq_work *entry, int flags) | ||
42 | { | ||
43 | unsigned long next = (unsigned long)entry; | ||
44 | next |= flags; | ||
45 | return (struct irq_work *)next; | ||
46 | } | ||
47 | |||
48 | static DEFINE_PER_CPU(struct irq_work *, irq_work_list); | ||
49 | 29 | ||
50 | /* | 30 | /* |
51 | * Claim the entry so that no one else will poke at it. | 31 | * Claim the entry so that no one else will poke at it. |
52 | */ | 32 | */ |
53 | static bool irq_work_claim(struct irq_work *entry) | 33 | static bool irq_work_claim(struct irq_work *work) |
54 | { | 34 | { |
55 | struct irq_work *next, *nflags; | 35 | unsigned long flags, nflags; |
56 | 36 | ||
57 | do { | 37 | for (;;) { |
58 | next = entry->next; | 38 | flags = work->flags; |
59 | if ((unsigned long)next & IRQ_WORK_PENDING) | 39 | if (flags & IRQ_WORK_PENDING) |
60 | return false; | 40 | return false; |
61 | nflags = next_flags(next, IRQ_WORK_FLAGS); | 41 | nflags = flags | IRQ_WORK_FLAGS; |
62 | } while (cmpxchg(&entry->next, next, nflags) != next); | 42 | if (cmpxchg(&work->flags, flags, nflags) == flags) |
43 | break; | ||
44 | cpu_relax(); | ||
45 | } | ||
63 | 46 | ||
64 | return true; | 47 | return true; |
65 | } | 48 | } |
66 | 49 | ||
67 | |||
68 | void __weak arch_irq_work_raise(void) | 50 | void __weak arch_irq_work_raise(void) |
69 | { | 51 | { |
70 | /* | 52 | /* |
@@ -75,20 +57,15 @@ void __weak arch_irq_work_raise(void) | |||
75 | /* | 57 | /* |
76 | * Queue the entry and raise the IPI if needed. | 58 | * Queue the entry and raise the IPI if needed. |
77 | */ | 59 | */ |
78 | static void __irq_work_queue(struct irq_work *entry) | 60 | static void __irq_work_queue(struct irq_work *work) |
79 | { | 61 | { |
80 | struct irq_work *next; | 62 | bool empty; |
81 | 63 | ||
82 | preempt_disable(); | 64 | preempt_disable(); |
83 | 65 | ||
84 | do { | 66 | empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); |
85 | next = __this_cpu_read(irq_work_list); | ||
86 | /* Can assign non-atomic because we keep the flags set. */ | ||
87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); | ||
88 | } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); | ||
89 | |||
90 | /* The list was empty, raise self-interrupt to start processing. */ | 67 | /* The list was empty, raise self-interrupt to start processing. */ |
91 | if (!irq_work_next(entry)) | 68 | if (empty) |
92 | arch_irq_work_raise(); | 69 | arch_irq_work_raise(); |
93 | 70 | ||
94 | preempt_enable(); | 71 | preempt_enable(); |
@@ -100,16 +77,16 @@ static void __irq_work_queue(struct irq_work *entry) | |||
100 | * | 77 | * |
101 | * Can be re-enqueued while the callback is still in progress. | 78 | * Can be re-enqueued while the callback is still in progress. |
102 | */ | 79 | */ |
103 | bool irq_work_queue(struct irq_work *entry) | 80 | bool irq_work_queue(struct irq_work *work) |
104 | { | 81 | { |
105 | if (!irq_work_claim(entry)) { | 82 | if (!irq_work_claim(work)) { |
106 | /* | 83 | /* |
107 | * Already enqueued, can't do! | 84 | * Already enqueued, can't do! |
108 | */ | 85 | */ |
109 | return false; | 86 | return false; |
110 | } | 87 | } |
111 | 88 | ||
112 | __irq_work_queue(entry); | 89 | __irq_work_queue(work); |
113 | return true; | 90 | return true; |
114 | } | 91 | } |
115 | EXPORT_SYMBOL_GPL(irq_work_queue); | 92 | EXPORT_SYMBOL_GPL(irq_work_queue); |
@@ -120,34 +97,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue); | |||
120 | */ | 97 | */ |
121 | void irq_work_run(void) | 98 | void irq_work_run(void) |
122 | { | 99 | { |
123 | struct irq_work *list; | 100 | struct irq_work *work; |
101 | struct llist_head *this_list; | ||
102 | struct llist_node *llnode; | ||
124 | 103 | ||
125 | if (this_cpu_read(irq_work_list) == NULL) | 104 | this_list = &__get_cpu_var(irq_work_list); |
105 | if (llist_empty(this_list)) | ||
126 | return; | 106 | return; |
127 | 107 | ||
128 | BUG_ON(!in_irq()); | 108 | BUG_ON(!in_irq()); |
129 | BUG_ON(!irqs_disabled()); | 109 | BUG_ON(!irqs_disabled()); |
130 | 110 | ||
131 | list = this_cpu_xchg(irq_work_list, NULL); | 111 | llnode = llist_del_all(this_list); |
132 | 112 | while (llnode != NULL) { | |
133 | while (list != NULL) { | 113 | work = llist_entry(llnode, struct irq_work, llnode); |
134 | struct irq_work *entry = list; | ||
135 | 114 | ||
136 | list = irq_work_next(list); | 115 | llnode = llist_next(llnode); |
137 | 116 | ||
138 | /* | 117 | /* |
139 | * Clear the PENDING bit, after this point the @entry | 118 | * Clear the PENDING bit, after this point the @work |
140 | * can be re-used. | 119 | * can be re-used. |
141 | */ | 120 | */ |
142 | entry->next = next_flags(NULL, IRQ_WORK_BUSY); | 121 | work->flags = IRQ_WORK_BUSY; |
143 | entry->func(entry); | 122 | work->func(work); |
144 | /* | 123 | /* |
145 | * Clear the BUSY bit and return to the free state if | 124 | * Clear the BUSY bit and return to the free state if |
146 | * no-one else claimed it meanwhile. | 125 | * no-one else claimed it meanwhile. |
147 | */ | 126 | */ |
148 | (void)cmpxchg(&entry->next, | 127 | (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); |
149 | next_flags(NULL, IRQ_WORK_BUSY), | ||
150 | NULL); | ||
151 | } | 128 | } |
152 | } | 129 | } |
153 | EXPORT_SYMBOL_GPL(irq_work_run); | 130 | EXPORT_SYMBOL_GPL(irq_work_run); |
@@ -156,11 +133,11 @@ EXPORT_SYMBOL_GPL(irq_work_run); | |||
156 | * Synchronize against the irq_work @entry, ensures the entry is not | 133 | * Synchronize against the irq_work @entry, ensures the entry is not |
157 | * currently in use. | 134 | * currently in use. |
158 | */ | 135 | */ |
159 | void irq_work_sync(struct irq_work *entry) | 136 | void irq_work_sync(struct irq_work *work) |
160 | { | 137 | { |
161 | WARN_ON_ONCE(irqs_disabled()); | 138 | WARN_ON_ONCE(irqs_disabled()); |
162 | 139 | ||
163 | while (irq_work_is_set(entry, IRQ_WORK_BUSY)) | 140 | while (work->flags & IRQ_WORK_BUSY) |
164 | cpu_relax(); | 141 | cpu_relax(); |
165 | } | 142 | } |
166 | EXPORT_SYMBOL_GPL(irq_work_sync); | 143 | EXPORT_SYMBOL_GPL(irq_work_sync); |