aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2011-09-08 02:00:46 -0400
committerIngo Molnar <mingo@elte.hu>2011-10-04 06:43:49 -0400
commit38aaf8090d34b623b7919d8c933f6e938c9bf44b (patch)
tree4d036cfedd9e7669b6491900e407f59adbc93c69 /kernel
parent781f7fd916fc77a862e20063ed3aeedf173234f9 (diff)
irq_work: Use llist in the struct irq_work logic
Use llist in irq_work instead of the lock-less linked list implementation in irq_work to avoid the code duplication. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1315461646-1379-6-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq_work.c91
1 files changed, 33 insertions, 58 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index c58fa7da8aef..6f0a4310defd 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,54 +17,34 @@
17 * claimed NULL, 3 -> {pending} : claimed to be enqueued 17 * claimed NULL, 3 -> {pending} : claimed to be enqueued
18 * pending next, 3 -> {busy} : queued, pending callback 18 * pending next, 3 -> {busy} : queued, pending callback
19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed 19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
20 *
21 * We use the lower two bits of the next pointer to keep PENDING and BUSY
22 * flags.
23 */ 20 */
24 21
25#define IRQ_WORK_PENDING 1UL 22#define IRQ_WORK_PENDING 1UL
26#define IRQ_WORK_BUSY 2UL 23#define IRQ_WORK_BUSY 2UL
27#define IRQ_WORK_FLAGS 3UL 24#define IRQ_WORK_FLAGS 3UL
28 25
29static inline bool irq_work_is_set(struct irq_work *entry, int flags) 26static DEFINE_PER_CPU(struct llist_head, irq_work_list);
30{
31 return (unsigned long)entry->next & flags;
32}
33
34static inline struct irq_work *irq_work_next(struct irq_work *entry)
35{
36 unsigned long next = (unsigned long)entry->next;
37 next &= ~IRQ_WORK_FLAGS;
38 return (struct irq_work *)next;
39}
40
41static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
42{
43 unsigned long next = (unsigned long)entry;
44 next |= flags;
45 return (struct irq_work *)next;
46}
47
48static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
49 27
50/* 28/*
51 * Claim the entry so that no one else will poke at it. 29 * Claim the entry so that no one else will poke at it.
52 */ 30 */
53static bool irq_work_claim(struct irq_work *entry) 31static bool irq_work_claim(struct irq_work *work)
54{ 32{
55 struct irq_work *next, *nflags; 33 unsigned long flags, nflags;
56 34
57 do { 35 for (;;) {
58 next = entry->next; 36 flags = work->flags;
59 if ((unsigned long)next & IRQ_WORK_PENDING) 37 if (flags & IRQ_WORK_PENDING)
60 return false; 38 return false;
61 nflags = next_flags(next, IRQ_WORK_FLAGS); 39 nflags = flags | IRQ_WORK_FLAGS;
62 } while (cmpxchg(&entry->next, next, nflags) != next); 40 if (cmpxchg(&work->flags, flags, nflags) == flags)
41 break;
42 cpu_relax();
43 }
63 44
64 return true; 45 return true;
65} 46}
66 47
67
68void __weak arch_irq_work_raise(void) 48void __weak arch_irq_work_raise(void)
69{ 49{
70 /* 50 /*
@@ -75,20 +55,15 @@ void __weak arch_irq_work_raise(void)
75/* 55/*
76 * Queue the entry and raise the IPI if needed. 56 * Queue the entry and raise the IPI if needed.
77 */ 57 */
78static void __irq_work_queue(struct irq_work *entry) 58static void __irq_work_queue(struct irq_work *work)
79{ 59{
80 struct irq_work *next; 60 bool empty;
81 61
82 preempt_disable(); 62 preempt_disable();
83 63
84 do { 64 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
85 next = __this_cpu_read(irq_work_list);
86 /* Can assign non-atomic because we keep the flags set. */
87 entry->next = next_flags(next, IRQ_WORK_FLAGS);
88 } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
89
90 /* The list was empty, raise self-interrupt to start processing. */ 65 /* The list was empty, raise self-interrupt to start processing. */
91 if (!irq_work_next(entry)) 66 if (empty)
92 arch_irq_work_raise(); 67 arch_irq_work_raise();
93 68
94 preempt_enable(); 69 preempt_enable();
@@ -100,16 +75,16 @@ static void __irq_work_queue(struct irq_work *entry)
100 * 75 *
101 * Can be re-enqueued while the callback is still in progress. 76 * Can be re-enqueued while the callback is still in progress.
102 */ 77 */
103bool irq_work_queue(struct irq_work *entry) 78bool irq_work_queue(struct irq_work *work)
104{ 79{
105 if (!irq_work_claim(entry)) { 80 if (!irq_work_claim(work)) {
106 /* 81 /*
107 * Already enqueued, can't do! 82 * Already enqueued, can't do!
108 */ 83 */
109 return false; 84 return false;
110 } 85 }
111 86
112 __irq_work_queue(entry); 87 __irq_work_queue(work);
113 return true; 88 return true;
114} 89}
115EXPORT_SYMBOL_GPL(irq_work_queue); 90EXPORT_SYMBOL_GPL(irq_work_queue);
@@ -120,34 +95,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
120 */ 95 */
121void irq_work_run(void) 96void irq_work_run(void)
122{ 97{
123 struct irq_work *list; 98 struct irq_work *work;
99 struct llist_head *this_list;
100 struct llist_node *llnode;
124 101
125 if (this_cpu_read(irq_work_list) == NULL) 102 this_list = &__get_cpu_var(irq_work_list);
103 if (llist_empty(this_list))
126 return; 104 return;
127 105
128 BUG_ON(!in_irq()); 106 BUG_ON(!in_irq());
129 BUG_ON(!irqs_disabled()); 107 BUG_ON(!irqs_disabled());
130 108
131 list = this_cpu_xchg(irq_work_list, NULL); 109 llnode = llist_del_all(this_list);
132 110 while (llnode != NULL) {
133 while (list != NULL) { 111 work = llist_entry(llnode, struct irq_work, llnode);
134 struct irq_work *entry = list;
135 112
136 list = irq_work_next(list); 113 llnode = llnode->next;
137 114
138 /* 115 /*
139 * Clear the PENDING bit, after this point the @entry 116 * Clear the PENDING bit, after this point the @work
140 * can be re-used. 117 * can be re-used.
141 */ 118 */
142 entry->next = next_flags(NULL, IRQ_WORK_BUSY); 119 work->flags = IRQ_WORK_BUSY;
143 entry->func(entry); 120 work->func(work);
144 /* 121 /*
145 * Clear the BUSY bit and return to the free state if 122 * Clear the BUSY bit and return to the free state if
146 * no-one else claimed it meanwhile. 123 * no-one else claimed it meanwhile.
147 */ 124 */
148 (void)cmpxchg(&entry->next, 125 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
149 next_flags(NULL, IRQ_WORK_BUSY),
150 NULL);
151 } 126 }
152} 127}
153EXPORT_SYMBOL_GPL(irq_work_run); 128EXPORT_SYMBOL_GPL(irq_work_run);
@@ -156,11 +131,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
156 * Synchronize against the irq_work @entry, ensures the entry is not 131 * Synchronize against the irq_work @entry, ensures the entry is not
157 * currently in use. 132 * currently in use.
158 */ 133 */
159void irq_work_sync(struct irq_work *entry) 134void irq_work_sync(struct irq_work *work)
160{ 135{
161 WARN_ON_ONCE(irqs_disabled()); 136 WARN_ON_ONCE(irqs_disabled());
162 137
163 while (irq_work_is_set(entry, IRQ_WORK_BUSY)) 138 while (work->flags & IRQ_WORK_BUSY)
164 cpu_relax(); 139 cpu_relax();
165} 140}
166EXPORT_SYMBOL_GPL(irq_work_sync); 141EXPORT_SYMBOL_GPL(irq_work_sync);