diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-26 13:49:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-26 13:49:48 -0400 |
commit | 18ffa418aead13c56515ac74cd26105102128aca (patch) | |
tree | 2096ea8db3b2594bd25ad39a70edc691219f669b /kernel/async.c | |
parent | ab76f3d771590d5c89faa3219559c5d3fc0ce0c2 (diff) | |
parent | 8e0ee43bc2c3e19db56a4adaa9a9b04ce885cd84 (diff) |
Merge commit 'v2.6.29' into x86/setup-lzma
Diffstat (limited to 'kernel/async.c')
-rw-r--r-- | kernel/async.c | 115 |
1 files changed, 93 insertions, 22 deletions
diff --git a/kernel/async.c b/kernel/async.c index f286e9f2b736..f565891f2c9b 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel. | |||
54 | #include <linux/sched.h> | 54 | #include <linux/sched.h> |
55 | #include <linux/init.h> | 55 | #include <linux/init.h> |
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | #include <linux/delay.h> | ||
57 | #include <asm/atomic.h> | 58 | #include <asm/atomic.h> |
58 | 59 | ||
59 | static async_cookie_t next_cookie = 1; | 60 | static async_cookie_t next_cookie = 1; |
@@ -90,12 +91,12 @@ extern int initcall_debug; | |||
90 | static async_cookie_t __lowest_in_progress(struct list_head *running) | 91 | static async_cookie_t __lowest_in_progress(struct list_head *running) |
91 | { | 92 | { |
92 | struct async_entry *entry; | 93 | struct async_entry *entry; |
93 | if (!list_empty(&async_pending)) { | 94 | if (!list_empty(running)) { |
94 | entry = list_first_entry(&async_pending, | 95 | entry = list_first_entry(running, |
95 | struct async_entry, list); | 96 | struct async_entry, list); |
96 | return entry->cookie; | 97 | return entry->cookie; |
97 | } else if (!list_empty(running)) { | 98 | } else if (!list_empty(&async_pending)) { |
98 | entry = list_first_entry(running, | 99 | entry = list_first_entry(&async_pending, |
99 | struct async_entry, list); | 100 | struct async_entry, list); |
100 | return entry->cookie; | 101 | return entry->cookie; |
101 | } else { | 102 | } else { |
@@ -104,6 +105,17 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) | |||
104 | } | 105 | } |
105 | 106 | ||
106 | } | 107 | } |
108 | |||
109 | static async_cookie_t lowest_in_progress(struct list_head *running) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | async_cookie_t ret; | ||
113 | |||
114 | spin_lock_irqsave(&async_lock, flags); | ||
115 | ret = __lowest_in_progress(running); | ||
116 | spin_unlock_irqrestore(&async_lock, flags); | ||
117 | return ret; | ||
118 | } | ||
107 | /* | 119 | /* |
108 | * pick the first pending entry and run it | 120 | * pick the first pending entry and run it |
109 | */ | 121 | */ |
@@ -121,21 +133,23 @@ static void run_one_entry(void) | |||
121 | entry = list_first_entry(&async_pending, struct async_entry, list); | 133 | entry = list_first_entry(&async_pending, struct async_entry, list); |
122 | 134 | ||
123 | /* 2) move it to the running queue */ | 135 | /* 2) move it to the running queue */ |
124 | list_del(&entry->list); | 136 | list_move_tail(&entry->list, entry->running); |
125 | list_add_tail(&entry->list, &async_running); | ||
126 | spin_unlock_irqrestore(&async_lock, flags); | 137 | spin_unlock_irqrestore(&async_lock, flags); |
127 | 138 | ||
128 | /* 3) run it (and print duration)*/ | 139 | /* 3) run it (and print duration)*/ |
129 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 140 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
130 | printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); | 141 | printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, |
142 | entry->func, task_pid_nr(current)); | ||
131 | calltime = ktime_get(); | 143 | calltime = ktime_get(); |
132 | } | 144 | } |
133 | entry->func(entry->data, entry->cookie); | 145 | entry->func(entry->data, entry->cookie); |
134 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 146 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
135 | rettime = ktime_get(); | 147 | rettime = ktime_get(); |
136 | delta = ktime_sub(rettime, calltime); | 148 | delta = ktime_sub(rettime, calltime); |
137 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, | 149 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", |
138 | entry->func, ktime_to_ns(delta) >> 10); | 150 | (long long)entry->cookie, |
151 | entry->func, | ||
152 | (long long)ktime_to_ns(delta) >> 10); | ||
139 | } | 153 | } |
140 | 154 | ||
141 | /* 4) remove it from the running queue */ | 155 | /* 4) remove it from the running queue */ |
@@ -194,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l | |||
194 | return newcookie; | 208 | return newcookie; |
195 | } | 209 | } |
196 | 210 | ||
211 | /** | ||
212 | * async_schedule - schedule a function for asynchronous execution | ||
213 | * @ptr: function to execute asynchronously | ||
214 | * @data: data pointer to pass to the function | ||
215 | * | ||
216 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
217 | * Note: This function may be called from atomic or non-atomic contexts. | ||
218 | */ | ||
197 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) | 219 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
198 | { | 220 | { |
199 | return __async_schedule(ptr, data, &async_pending); | 221 | return __async_schedule(ptr, data, &async_running); |
200 | } | 222 | } |
201 | EXPORT_SYMBOL_GPL(async_schedule); | 223 | EXPORT_SYMBOL_GPL(async_schedule); |
202 | 224 | ||
203 | async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) | 225 | /** |
226 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain | ||
227 | * @ptr: function to execute asynchronously | ||
228 | * @data: data pointer to pass to the function | ||
229 | * @running: running list for the domain | ||
230 | * | ||
231 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
232 | * @running may be used in the async_synchronize_*_domain() functions | ||
233 | * to wait within a certain synchronization domain rather than globally. | ||
234 | * A synchronization domain is specified via the running queue @running to use. | ||
235 | * Note: This function may be called from atomic or non-atomic contexts. | ||
236 | */ | ||
237 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, | ||
238 | struct list_head *running) | ||
204 | { | 239 | { |
205 | return __async_schedule(ptr, data, running); | 240 | return __async_schedule(ptr, data, running); |
206 | } | 241 | } |
207 | EXPORT_SYMBOL_GPL(async_schedule_special); | 242 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
208 | 243 | ||
244 | /** | ||
245 | * async_synchronize_full - synchronize all asynchronous function calls | ||
246 | * | ||
247 | * This function waits until all asynchronous function calls have been done. | ||
248 | */ | ||
209 | void async_synchronize_full(void) | 249 | void async_synchronize_full(void) |
210 | { | 250 | { |
211 | do { | 251 | do { |
@@ -214,13 +254,30 @@ void async_synchronize_full(void) | |||
214 | } | 254 | } |
215 | EXPORT_SYMBOL_GPL(async_synchronize_full); | 255 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
216 | 256 | ||
217 | void async_synchronize_full_special(struct list_head *list) | 257 | /** |
258 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain | ||
259 | * @list: running list to synchronize on | ||
260 | * | ||
261 | * This function waits until all asynchronous function calls for the | ||
262 | * synchronization domain specified by the running list @list have been done. | ||
263 | */ | ||
264 | void async_synchronize_full_domain(struct list_head *list) | ||
218 | { | 265 | { |
219 | async_synchronize_cookie_special(next_cookie, list); | 266 | async_synchronize_cookie_domain(next_cookie, list); |
220 | } | 267 | } |
221 | EXPORT_SYMBOL_GPL(async_synchronize_full_special); | 268 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
222 | 269 | ||
223 | void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) | 270 | /** |
271 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing | ||
272 | * @cookie: async_cookie_t to use as checkpoint | ||
273 | * @running: running list to synchronize on | ||
274 | * | ||
275 | * This function waits until all asynchronous function calls for the | ||
276 | * synchronization domain specified by the running list @list submitted | ||
277 | * prior to @cookie have been done. | ||
278 | */ | ||
279 | void async_synchronize_cookie_domain(async_cookie_t cookie, | ||
280 | struct list_head *running) | ||
224 | { | 281 | { |
225 | ktime_t starttime, delta, endtime; | 282 | ktime_t starttime, delta, endtime; |
226 | 283 | ||
@@ -229,21 +286,29 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
229 | starttime = ktime_get(); | 286 | starttime = ktime_get(); |
230 | } | 287 | } |
231 | 288 | ||
232 | wait_event(async_done, __lowest_in_progress(running) >= cookie); | 289 | wait_event(async_done, lowest_in_progress(running) >= cookie); |
233 | 290 | ||
234 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 291 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
235 | endtime = ktime_get(); | 292 | endtime = ktime_get(); |
236 | delta = ktime_sub(endtime, starttime); | 293 | delta = ktime_sub(endtime, starttime); |
237 | 294 | ||
238 | printk("async_continuing @ %i after %lli usec\n", | 295 | printk("async_continuing @ %i after %lli usec\n", |
239 | task_pid_nr(current), ktime_to_ns(delta) >> 10); | 296 | task_pid_nr(current), |
297 | (long long)ktime_to_ns(delta) >> 10); | ||
240 | } | 298 | } |
241 | } | 299 | } |
242 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); | 300 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
243 | 301 | ||
302 | /** | ||
303 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | ||
304 | * @cookie: async_cookie_t to use as checkpoint | ||
305 | * | ||
306 | * This function waits until all asynchronous function calls prior to @cookie | ||
307 | * have been done. | ||
308 | */ | ||
244 | void async_synchronize_cookie(async_cookie_t cookie) | 309 | void async_synchronize_cookie(async_cookie_t cookie) |
245 | { | 310 | { |
246 | async_synchronize_cookie_special(cookie, &async_running); | 311 | async_synchronize_cookie_domain(cookie, &async_running); |
247 | } | 312 | } |
248 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | 313 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
249 | 314 | ||
@@ -304,7 +369,11 @@ static int async_manager_thread(void *unused) | |||
304 | ec = atomic_read(&entry_count); | 369 | ec = atomic_read(&entry_count); |
305 | 370 | ||
306 | while (tc < ec && tc < MAX_THREADS) { | 371 | while (tc < ec && tc < MAX_THREADS) { |
307 | kthread_run(async_thread, NULL, "async/%i", tc); | 372 | if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", |
373 | tc))) { | ||
374 | msleep(100); | ||
375 | continue; | ||
376 | } | ||
308 | atomic_inc(&thread_count); | 377 | atomic_inc(&thread_count); |
309 | tc++; | 378 | tc++; |
310 | } | 379 | } |
@@ -319,7 +388,9 @@ static int async_manager_thread(void *unused) | |||
319 | static int __init async_init(void) | 388 | static int __init async_init(void) |
320 | { | 389 | { |
321 | if (async_enabled) | 390 | if (async_enabled) |
322 | kthread_run(async_manager_thread, NULL, "async/mgr"); | 391 | if (IS_ERR(kthread_run(async_manager_thread, NULL, |
392 | "async/mgr"))) | ||
393 | async_enabled = 0; | ||
323 | return 0; | 394 | return 0; |
324 | } | 395 | } |
325 | 396 | ||