aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c323
1 files changed, 323 insertions, 0 deletions
diff --git a/kernel/async.c b/kernel/async.c
new file mode 100644
index 000000000000..64cc916299a5
--- /dev/null
+++ b/kernel/async.c
@@ -0,0 +1,323 @@
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
52#include <linux/module.h>
53#include <linux/wait.h>
54#include <linux/sched.h>
55#include <linux/init.h>
56#include <linux/kthread.h>
57#include <asm/atomic.h>
58
59static async_cookie_t next_cookie = 1;
60
61#define MAX_THREADS 256
62#define MAX_WORK 32768
63
64static LIST_HEAD(async_pending);
65static LIST_HEAD(async_running);
66static DEFINE_SPINLOCK(async_lock);
67
68struct async_entry {
69 struct list_head list;
70 async_cookie_t cookie;
71 async_func_ptr *func;
72 void *data;
73 struct list_head *running;
74};
75
76static DECLARE_WAIT_QUEUE_HEAD(async_done);
77static DECLARE_WAIT_QUEUE_HEAD(async_new);
78
79static atomic_t entry_count;
80static atomic_t thread_count;
81
82extern int initcall_debug;
83
84
85/*
86 * MUST be called with the lock held!
87 */
88static async_cookie_t __lowest_in_progress(struct list_head *running)
89{
90 struct async_entry *entry;
91 if (!list_empty(&async_pending)) {
92 entry = list_first_entry(&async_pending,
93 struct async_entry, list);
94 return entry->cookie;
95 } else if (!list_empty(running)) {
96 entry = list_first_entry(running,
97 struct async_entry, list);
98 return entry->cookie;
99 } else {
100 /* nothing in progress... next_cookie is "infinity" */
101 return next_cookie;
102 }
103
104}
105/*
106 * pick the first pending entry and run it
107 */
108static void run_one_entry(void)
109{
110 unsigned long flags;
111 struct async_entry *entry;
112 ktime_t calltime, delta, rettime;
113
114 /* 1) pick one task from the pending queue */
115
116 spin_lock_irqsave(&async_lock, flags);
117 if (list_empty(&async_pending))
118 goto out;
119 entry = list_first_entry(&async_pending, struct async_entry, list);
120
121 /* 2) move it to the running queue */
122 list_del(&entry->list);
123 list_add_tail(&entry->list, &async_running);
124 spin_unlock_irqrestore(&async_lock, flags);
125
126 /* 3) run it (and print duration)*/
127 if (initcall_debug && system_state == SYSTEM_BOOTING) {
128 printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
129 calltime = ktime_get();
130 }
131 entry->func(entry->data, entry->cookie);
132 if (initcall_debug && system_state == SYSTEM_BOOTING) {
133 rettime = ktime_get();
134 delta = ktime_sub(rettime, calltime);
135 printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
136 entry->func, ktime_to_ns(delta) >> 10);
137 }
138
139 /* 4) remove it from the running queue */
140 spin_lock_irqsave(&async_lock, flags);
141 list_del(&entry->list);
142
143 /* 5) free the entry */
144 kfree(entry);
145 atomic_dec(&entry_count);
146
147 spin_unlock_irqrestore(&async_lock, flags);
148
149 /* 6) wake up any waiters. */
150 wake_up(&async_done);
151 return;
152
153out:
154 spin_unlock_irqrestore(&async_lock, flags);
155}
156
157
158static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
159{
160 struct async_entry *entry;
161 unsigned long flags;
162 async_cookie_t newcookie;
163
164
165 /* allow irq-off callers */
166 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
167
168 /*
169 * If we're out of memory or if there's too much work
170 * pending already, we execute synchronously.
171 */
172 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
173 kfree(entry);
174 spin_lock_irqsave(&async_lock, flags);
175 newcookie = next_cookie++;
176 spin_unlock_irqrestore(&async_lock, flags);
177
178 /* low on memory.. run synchronously */
179 ptr(data, newcookie);
180 return newcookie;
181 }
182 entry->func = ptr;
183 entry->data = data;
184 entry->running = running;
185
186 spin_lock_irqsave(&async_lock, flags);
187 newcookie = entry->cookie = next_cookie++;
188 list_add_tail(&entry->list, &async_pending);
189 atomic_inc(&entry_count);
190 spin_unlock_irqrestore(&async_lock, flags);
191 wake_up(&async_new);
192 return newcookie;
193}
194
195async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
196{
197 return __async_schedule(ptr, data, &async_pending);
198}
199EXPORT_SYMBOL_GPL(async_schedule);
200
201async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
202{
203 return __async_schedule(ptr, data, running);
204}
205EXPORT_SYMBOL_GPL(async_schedule_special);
206
207void async_synchronize_full(void)
208{
209 do {
210 async_synchronize_cookie(next_cookie);
211 } while (!list_empty(&async_running) || !list_empty(&async_pending));
212}
213EXPORT_SYMBOL_GPL(async_synchronize_full);
214
215void async_synchronize_full_special(struct list_head *list)
216{
217 async_synchronize_cookie_special(next_cookie, list);
218}
219EXPORT_SYMBOL_GPL(async_synchronize_full_special);
220
221void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
222{
223 ktime_t starttime, delta, endtime;
224
225 if (initcall_debug && system_state == SYSTEM_BOOTING) {
226 printk("async_waiting @ %i\n", task_pid_nr(current));
227 starttime = ktime_get();
228 }
229
230 wait_event(async_done, __lowest_in_progress(running) >= cookie);
231
232 if (initcall_debug && system_state == SYSTEM_BOOTING) {
233 endtime = ktime_get();
234 delta = ktime_sub(endtime, starttime);
235
236 printk("async_continuing @ %i after %lli usec\n",
237 task_pid_nr(current), ktime_to_ns(delta) >> 10);
238 }
239}
240EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
241
242void async_synchronize_cookie(async_cookie_t cookie)
243{
244 async_synchronize_cookie_special(cookie, &async_running);
245}
246EXPORT_SYMBOL_GPL(async_synchronize_cookie);
247
248
249static int async_thread(void *unused)
250{
251 DECLARE_WAITQUEUE(wq, current);
252 add_wait_queue(&async_new, &wq);
253
254 while (!kthread_should_stop()) {
255 int ret = HZ;
256 set_current_state(TASK_INTERRUPTIBLE);
257 /*
258 * check the list head without lock.. false positives
259 * are dealt with inside run_one_entry() while holding
260 * the lock.
261 */
262 rmb();
263 if (!list_empty(&async_pending))
264 run_one_entry();
265 else
266 ret = schedule_timeout(HZ);
267
268 if (ret == 0) {
269 /*
270 * we timed out, this means we as thread are redundant.
271 * we sign off and die, but we to avoid any races there
272 * is a last-straw check to see if work snuck in.
273 */
274 atomic_dec(&thread_count);
275 wmb(); /* manager must see our departure first */
276 if (list_empty(&async_pending))
277 break;
278 /*
279 * woops work came in between us timing out and us
280 * signing off; we need to stay alive and keep working.
281 */
282 atomic_inc(&thread_count);
283 }
284 }
285 remove_wait_queue(&async_new, &wq);
286
287 return 0;
288}
289
290static int async_manager_thread(void *unused)
291{
292 DECLARE_WAITQUEUE(wq, current);
293 add_wait_queue(&async_new, &wq);
294
295 while (!kthread_should_stop()) {
296 int tc, ec;
297
298 set_current_state(TASK_INTERRUPTIBLE);
299
300 tc = atomic_read(&thread_count);
301 rmb();
302 ec = atomic_read(&entry_count);
303
304 while (tc < ec && tc < MAX_THREADS) {
305 kthread_run(async_thread, NULL, "async/%i", tc);
306 atomic_inc(&thread_count);
307 tc++;
308 }
309
310 schedule();
311 }
312 remove_wait_queue(&async_new, &wq);
313
314 return 0;
315}
316
317static int __init async_init(void)
318{
319 kthread_run(async_manager_thread, NULL, "async/mgr");
320 return 0;
321}
322
323core_initcall(async_init);