aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c142
1 files changed, 23 insertions, 119 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 27235f5de198..cd9dbb913c77 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -49,39 +49,33 @@ asynchronous and synchronous parts of the kernel.
49*/ 49*/
50 50
51#include <linux/async.h> 51#include <linux/async.h>
52#include <linux/bug.h>
53#include <linux/module.h> 52#include <linux/module.h>
54#include <linux/wait.h> 53#include <linux/wait.h>
55#include <linux/sched.h> 54#include <linux/sched.h>
56#include <linux/init.h> 55#include <linux/slab.h>
57#include <linux/kthread.h> 56#include <linux/workqueue.h>
58#include <linux/delay.h>
59#include <asm/atomic.h> 57#include <asm/atomic.h>
60 58
61static async_cookie_t next_cookie = 1; 59static async_cookie_t next_cookie = 1;
62 60
63#define MAX_THREADS 256
64#define MAX_WORK 32768 61#define MAX_WORK 32768
65 62
66static LIST_HEAD(async_pending); 63static LIST_HEAD(async_pending);
67static LIST_HEAD(async_running); 64static LIST_HEAD(async_running);
68static DEFINE_SPINLOCK(async_lock); 65static DEFINE_SPINLOCK(async_lock);
69 66
70static int async_enabled = 0;
71
72struct async_entry { 67struct async_entry {
73 struct list_head list; 68 struct list_head list;
74 async_cookie_t cookie; 69 struct work_struct work;
75 async_func_ptr *func; 70 async_cookie_t cookie;
76 void *data; 71 async_func_ptr *func;
77 struct list_head *running; 72 void *data;
73 struct list_head *running;
78}; 74};
79 75
80static DECLARE_WAIT_QUEUE_HEAD(async_done); 76static DECLARE_WAIT_QUEUE_HEAD(async_done);
81static DECLARE_WAIT_QUEUE_HEAD(async_new);
82 77
83static atomic_t entry_count; 78static atomic_t entry_count;
84static atomic_t thread_count;
85 79
86extern int initcall_debug; 80extern int initcall_debug;
87 81
@@ -116,27 +110,23 @@ static async_cookie_t lowest_in_progress(struct list_head *running)
116 spin_unlock_irqrestore(&async_lock, flags); 110 spin_unlock_irqrestore(&async_lock, flags);
117 return ret; 111 return ret;
118} 112}
113
119/* 114/*
120 * pick the first pending entry and run it 115 * pick the first pending entry and run it
121 */ 116 */
122static void run_one_entry(void) 117static void async_run_entry_fn(struct work_struct *work)
123{ 118{
119 struct async_entry *entry =
120 container_of(work, struct async_entry, work);
124 unsigned long flags; 121 unsigned long flags;
125 struct async_entry *entry;
126 ktime_t calltime, delta, rettime; 122 ktime_t calltime, delta, rettime;
127 123
128 /* 1) pick one task from the pending queue */ 124 /* 1) move self to the running queue */
129
130 spin_lock_irqsave(&async_lock, flags); 125 spin_lock_irqsave(&async_lock, flags);
131 if (list_empty(&async_pending))
132 goto out;
133 entry = list_first_entry(&async_pending, struct async_entry, list);
134
135 /* 2) move it to the running queue */
136 list_move_tail(&entry->list, entry->running); 126 list_move_tail(&entry->list, entry->running);
137 spin_unlock_irqrestore(&async_lock, flags); 127 spin_unlock_irqrestore(&async_lock, flags);
138 128
139 /* 3) run it (and print duration)*/ 129 /* 2) run (and print duration) */
140 if (initcall_debug && system_state == SYSTEM_BOOTING) { 130 if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, 131 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
142 entry->func, task_pid_nr(current)); 132 entry->func, task_pid_nr(current));
@@ -152,31 +142,25 @@ static void run_one_entry(void)
152 (long long)ktime_to_ns(delta) >> 10); 142 (long long)ktime_to_ns(delta) >> 10);
153 } 143 }
154 144
155 /* 4) remove it from the running queue */ 145 /* 3) remove self from the running queue */
156 spin_lock_irqsave(&async_lock, flags); 146 spin_lock_irqsave(&async_lock, flags);
157 list_del(&entry->list); 147 list_del(&entry->list);
158 148
159 /* 5) free the entry */ 149 /* 4) free the entry */
160 kfree(entry); 150 kfree(entry);
161 atomic_dec(&entry_count); 151 atomic_dec(&entry_count);
162 152
163 spin_unlock_irqrestore(&async_lock, flags); 153 spin_unlock_irqrestore(&async_lock, flags);
164 154
165 /* 6) wake up any waiters. */ 155 /* 5) wake up any waiters */
166 wake_up(&async_done); 156 wake_up(&async_done);
167 return;
168
169out:
170 spin_unlock_irqrestore(&async_lock, flags);
171} 157}
172 158
173
174static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) 159static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
175{ 160{
176 struct async_entry *entry; 161 struct async_entry *entry;
177 unsigned long flags; 162 unsigned long flags;
178 async_cookie_t newcookie; 163 async_cookie_t newcookie;
179
180 164
181 /* allow irq-off callers */ 165 /* allow irq-off callers */
182 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); 166 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
@@ -185,7 +169,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
185 * If we're out of memory or if there's too much work 169 * If we're out of memory or if there's too much work
186 * pending already, we execute synchronously. 170 * pending already, we execute synchronously.
187 */ 171 */
188 if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { 172 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
189 kfree(entry); 173 kfree(entry);
190 spin_lock_irqsave(&async_lock, flags); 174 spin_lock_irqsave(&async_lock, flags);
191 newcookie = next_cookie++; 175 newcookie = next_cookie++;
@@ -195,6 +179,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
195 ptr(data, newcookie); 179 ptr(data, newcookie);
196 return newcookie; 180 return newcookie;
197 } 181 }
182 INIT_WORK(&entry->work, async_run_entry_fn);
198 entry->func = ptr; 183 entry->func = ptr;
199 entry->data = data; 184 entry->data = data;
200 entry->running = running; 185 entry->running = running;
@@ -204,7 +189,10 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
204 list_add_tail(&entry->list, &async_pending); 189 list_add_tail(&entry->list, &async_pending);
205 atomic_inc(&entry_count); 190 atomic_inc(&entry_count);
206 spin_unlock_irqrestore(&async_lock, flags); 191 spin_unlock_irqrestore(&async_lock, flags);
207 wake_up(&async_new); 192
193 /* schedule for execution */
194 queue_work(system_unbound_wq, &entry->work);
195
208 return newcookie; 196 return newcookie;
209} 197}
210 198
@@ -311,87 +299,3 @@ void async_synchronize_cookie(async_cookie_t cookie)
311 async_synchronize_cookie_domain(cookie, &async_running); 299 async_synchronize_cookie_domain(cookie, &async_running);
312} 300}
313EXPORT_SYMBOL_GPL(async_synchronize_cookie); 301EXPORT_SYMBOL_GPL(async_synchronize_cookie);
314
315
316static int async_thread(void *unused)
317{
318 DECLARE_WAITQUEUE(wq, current);
319 add_wait_queue(&async_new, &wq);
320
321 while (!kthread_should_stop()) {
322 int ret = HZ;
323 set_current_state(TASK_INTERRUPTIBLE);
324 /*
325 * check the list head without lock.. false positives
326 * are dealt with inside run_one_entry() while holding
327 * the lock.
328 */
329 rmb();
330 if (!list_empty(&async_pending))
331 run_one_entry();
332 else
333 ret = schedule_timeout(HZ);
334
335 if (ret == 0) {
336 /*
337 * we timed out, this means we as thread are redundant.
338 * we sign off and die, but we to avoid any races there
339 * is a last-straw check to see if work snuck in.
340 */
341 atomic_dec(&thread_count);
342 wmb(); /* manager must see our departure first */
343 if (list_empty(&async_pending))
344 break;
345 /*
346 * woops work came in between us timing out and us
347 * signing off; we need to stay alive and keep working.
348 */
349 atomic_inc(&thread_count);
350 }
351 }
352 remove_wait_queue(&async_new, &wq);
353
354 return 0;
355}
356
357static int async_manager_thread(void *unused)
358{
359 DECLARE_WAITQUEUE(wq, current);
360 add_wait_queue(&async_new, &wq);
361
362 while (!kthread_should_stop()) {
363 int tc, ec;
364
365 set_current_state(TASK_INTERRUPTIBLE);
366
367 tc = atomic_read(&thread_count);
368 rmb();
369 ec = atomic_read(&entry_count);
370
371 while (tc < ec && tc < MAX_THREADS) {
372 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
373 tc))) {
374 msleep(100);
375 continue;
376 }
377 atomic_inc(&thread_count);
378 tc++;
379 }
380
381 schedule();
382 }
383 remove_wait_queue(&async_new, &wq);
384
385 return 0;
386}
387
388static int __init async_init(void)
389{
390 async_enabled =
391 !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
392
393 WARN_ON(!async_enabled);
394 return 0;
395}
396
397core_initcall(async_init);