aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/elevator.c35
-rw-r--r--include/linux/async.h9
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/init.h1
-rw-r--r--init/do_mounts_initrd.c3
-rw-r--r--init/initramfs.c8
-rw-r--r--init/main.c16
-rw-r--r--kernel/async.c153
-rw-r--r--kernel/kmod.c9
9 files changed, 130 insertions, 109 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 9edba1b8323e..603b2c178740 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,14 +100,14 @@ static void elevator_put(struct elevator_type *e)
100 module_put(e->elevator_owner); 100 module_put(e->elevator_owner);
101} 101}
102 102
103static struct elevator_type *elevator_get(const char *name) 103static struct elevator_type *elevator_get(const char *name, bool try_loading)
104{ 104{
105 struct elevator_type *e; 105 struct elevator_type *e;
106 106
107 spin_lock(&elv_list_lock); 107 spin_lock(&elv_list_lock);
108 108
109 e = elevator_find(name); 109 e = elevator_find(name);
110 if (!e) { 110 if (!e && try_loading) {
111 spin_unlock(&elv_list_lock); 111 spin_unlock(&elv_list_lock);
112 request_module("%s-iosched", name); 112 request_module("%s-iosched", name);
113 spin_lock(&elv_list_lock); 113 spin_lock(&elv_list_lock);
@@ -136,6 +136,22 @@ static int __init elevator_setup(char *str)
136 136
137__setup("elevator=", elevator_setup); 137__setup("elevator=", elevator_setup);
138 138
139/* called during boot to load the elevator chosen by the elevator param */
140void __init load_default_elevator_module(void)
141{
142 struct elevator_type *e;
143
144 if (!chosen_elevator[0])
145 return;
146
147 spin_lock(&elv_list_lock);
148 e = elevator_find(chosen_elevator);
149 spin_unlock(&elv_list_lock);
150
151 if (!e)
152 request_module("%s-iosched", chosen_elevator);
153}
154
139static struct kobj_type elv_ktype; 155static struct kobj_type elv_ktype;
140 156
141static struct elevator_queue *elevator_alloc(struct request_queue *q, 157static struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -191,25 +207,30 @@ int elevator_init(struct request_queue *q, char *name)
191 q->boundary_rq = NULL; 207 q->boundary_rq = NULL;
192 208
193 if (name) { 209 if (name) {
194 e = elevator_get(name); 210 e = elevator_get(name, true);
195 if (!e) 211 if (!e)
196 return -EINVAL; 212 return -EINVAL;
197 } 213 }
198 214
215 /*
216 * Use the default elevator specified by config boot param or
217 * config option. Don't try to load modules as we could be running
218 * off async and request_module() isn't allowed from async.
219 */
199 if (!e && *chosen_elevator) { 220 if (!e && *chosen_elevator) {
200 e = elevator_get(chosen_elevator); 221 e = elevator_get(chosen_elevator, false);
201 if (!e) 222 if (!e)
202 printk(KERN_ERR "I/O scheduler %s not found\n", 223 printk(KERN_ERR "I/O scheduler %s not found\n",
203 chosen_elevator); 224 chosen_elevator);
204 } 225 }
205 226
206 if (!e) { 227 if (!e) {
207 e = elevator_get(CONFIG_DEFAULT_IOSCHED); 228 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
208 if (!e) { 229 if (!e) {
209 printk(KERN_ERR 230 printk(KERN_ERR
210 "Default I/O scheduler not found. " \ 231 "Default I/O scheduler not found. " \
211 "Using noop.\n"); 232 "Using noop.\n");
212 e = elevator_get("noop"); 233 e = elevator_get("noop", false);
213 } 234 }
214 } 235 }
215 236
@@ -951,7 +972,7 @@ int elevator_change(struct request_queue *q, const char *name)
951 return -ENXIO; 972 return -ENXIO;
952 973
953 strlcpy(elevator_name, name, sizeof(elevator_name)); 974 strlcpy(elevator_name, name, sizeof(elevator_name));
954 e = elevator_get(strstrip(elevator_name)); 975 e = elevator_get(strstrip(elevator_name), true);
955 if (!e) { 976 if (!e) {
956 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 977 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
957 return -EINVAL; 978 return -EINVAL;
diff --git a/include/linux/async.h b/include/linux/async.h
index 345169cfa304..a2e3f18b2ad6 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -19,8 +19,7 @@ typedef u64 async_cookie_t;
19typedef void (async_func_ptr) (void *data, async_cookie_t cookie); 19typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
20struct async_domain { 20struct async_domain {
21 struct list_head node; 21 struct list_head node;
22 struct list_head domain; 22 struct list_head pending;
23 int count;
24 unsigned registered:1; 23 unsigned registered:1;
25}; 24};
26 25
@@ -29,8 +28,7 @@ struct async_domain {
29 */ 28 */
30#define ASYNC_DOMAIN(_name) \ 29#define ASYNC_DOMAIN(_name) \
31 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ 30 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
32 .domain = LIST_HEAD_INIT(_name.domain), \ 31 .pending = LIST_HEAD_INIT(_name.pending), \
33 .count = 0, \
34 .registered = 1 } 32 .registered = 1 }
35 33
36/* 34/*
@@ -39,8 +37,7 @@ struct async_domain {
39 */ 37 */
40#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ 38#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
41 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ 39 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
42 .domain = LIST_HEAD_INIT(_name.domain), \ 40 .pending = LIST_HEAD_INIT(_name.pending), \
43 .count = 0, \
44 .registered = 0 } 41 .registered = 0 }
45 42
46extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); 43extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c03af7687bb4..186620631750 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -138,6 +138,7 @@ extern void elv_drain_elevator(struct request_queue *);
138/* 138/*
139 * io scheduler registration 139 * io scheduler registration
140 */ 140 */
141extern void __init load_default_elevator_module(void);
141extern int elv_register(struct elevator_type *); 142extern int elv_register(struct elevator_type *);
142extern void elv_unregister(struct elevator_type *); 143extern void elv_unregister(struct elevator_type *);
143 144
@@ -206,5 +207,9 @@ enum {
206 INIT_LIST_HEAD(&(rq)->csd.list); \ 207 INIT_LIST_HEAD(&(rq)->csd.list); \
207 } while (0) 208 } while (0)
208 209
210#else /* CONFIG_BLOCK */
211
212static inline void load_default_elevator_module(void) { }
213
209#endif /* CONFIG_BLOCK */ 214#endif /* CONFIG_BLOCK */
210#endif 215#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index 10ed4f436458..861814710d52 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -153,6 +153,7 @@ extern unsigned int reset_devices;
153/* used by init/main.c */ 153/* used by init/main.c */
154void setup_arch(char **); 154void setup_arch(char **);
155void prepare_namespace(void); 155void prepare_namespace(void);
156void __init load_default_modules(void);
156 157
157extern void (*late_time_init)(void); 158extern void (*late_time_init)(void);
158 159
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index f9acf71b9810..a32ec1ce882b 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -61,6 +61,9 @@ static void __init handle_initrd(void)
61 sys_mkdir("/old", 0700); 61 sys_mkdir("/old", 0700);
62 sys_chdir("/old"); 62 sys_chdir("/old");
63 63
64 /* try loading default modules from initrd */
65 load_default_modules();
66
64 /* 67 /*
65 * In case that a resume from disk is carried out by linuxrc or one of 68 * In case that a resume from disk is carried out by linuxrc or one of
66 * its children, we need to tell the freezer not to wait for us. 69 * its children, we need to tell the freezer not to wait for us.
diff --git a/init/initramfs.c b/init/initramfs.c
index 84c6bf111300..a67ef9dbda9d 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -592,7 +592,7 @@ static int __init populate_rootfs(void)
592 initrd_end - initrd_start); 592 initrd_end - initrd_start);
593 if (!err) { 593 if (!err) {
594 free_initrd(); 594 free_initrd();
595 return 0; 595 goto done;
596 } else { 596 } else {
597 clean_rootfs(); 597 clean_rootfs();
598 unpack_to_rootfs(__initramfs_start, __initramfs_size); 598 unpack_to_rootfs(__initramfs_start, __initramfs_size);
@@ -607,6 +607,7 @@ static int __init populate_rootfs(void)
607 sys_close(fd); 607 sys_close(fd);
608 free_initrd(); 608 free_initrd();
609 } 609 }
610 done:
610#else 611#else
611 printk(KERN_INFO "Unpacking initramfs...\n"); 612 printk(KERN_INFO "Unpacking initramfs...\n");
612 err = unpack_to_rootfs((char *)initrd_start, 613 err = unpack_to_rootfs((char *)initrd_start,
@@ -615,6 +616,11 @@ static int __init populate_rootfs(void)
615 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 616 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
616 free_initrd(); 617 free_initrd();
617#endif 618#endif
619 /*
620 * Try loading default modules from initramfs. This gives
621 * us a chance to load before device_initcalls.
622 */
623 load_default_modules();
618 } 624 }
619 return 0; 625 return 0;
620} 626}
diff --git a/init/main.c b/init/main.c
index cee4b5c66d81..63534a141b4e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -70,6 +70,8 @@
70#include <linux/perf_event.h> 70#include <linux/perf_event.h>
71#include <linux/file.h> 71#include <linux/file.h>
72#include <linux/ptrace.h> 72#include <linux/ptrace.h>
73#include <linux/blkdev.h>
74#include <linux/elevator.h>
73 75
74#include <asm/io.h> 76#include <asm/io.h>
75#include <asm/bugs.h> 77#include <asm/bugs.h>
@@ -794,6 +796,17 @@ static void __init do_pre_smp_initcalls(void)
794 do_one_initcall(*fn); 796 do_one_initcall(*fn);
795} 797}
796 798
799/*
800 * This function requests modules which should be loaded by default and is
801 * called twice right after initrd is mounted and right before init is
802 * exec'd. If such modules are on either initrd or rootfs, they will be
803 * loaded before control is passed to userland.
804 */
805void __init load_default_modules(void)
806{
807 load_default_elevator_module();
808}
809
797static int run_init_process(const char *init_filename) 810static int run_init_process(const char *init_filename)
798{ 811{
799 argv_init[0] = init_filename; 812 argv_init[0] = init_filename;
@@ -900,4 +913,7 @@ static noinline void __init kernel_init_freeable(void)
900 * we're essentially up and running. Get rid of the 913 * we're essentially up and running. Get rid of the
901 * initmem segments and start the user-mode stuff.. 914 * initmem segments and start the user-mode stuff..
902 */ 915 */
916
917 /* rootfs is available now, try loading default modules */
918 load_default_modules();
903} 919}
diff --git a/kernel/async.c b/kernel/async.c
index 6c68fc3fae7b..8ddee2c3e5b0 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -61,63 +61,48 @@ asynchronous and synchronous parts of the kernel.
61 61
62static async_cookie_t next_cookie = 1; 62static async_cookie_t next_cookie = 1;
63 63
64#define MAX_WORK 32768 64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
65 66
66static LIST_HEAD(async_pending); 67static LIST_HEAD(async_global_pending); /* pending from all registered doms */
67static ASYNC_DOMAIN(async_running); 68static ASYNC_DOMAIN(async_dfl_domain);
68static LIST_HEAD(async_domains);
69static DEFINE_SPINLOCK(async_lock); 69static DEFINE_SPINLOCK(async_lock);
70static DEFINE_MUTEX(async_register_mutex);
71 70
72struct async_entry { 71struct async_entry {
73 struct list_head list; 72 struct list_head domain_list;
73 struct list_head global_list;
74 struct work_struct work; 74 struct work_struct work;
75 async_cookie_t cookie; 75 async_cookie_t cookie;
76 async_func_ptr *func; 76 async_func_ptr *func;
77 void *data; 77 void *data;
78 struct async_domain *running; 78 struct async_domain *domain;
79}; 79};
80 80
81static DECLARE_WAIT_QUEUE_HEAD(async_done); 81static DECLARE_WAIT_QUEUE_HEAD(async_done);
82 82
83static atomic_t entry_count; 83static atomic_t entry_count;
84 84
85 85static async_cookie_t lowest_in_progress(struct async_domain *domain)
86/*
87 * MUST be called with the lock held!
88 */
89static async_cookie_t __lowest_in_progress(struct async_domain *running)
90{ 86{
91 async_cookie_t first_running = next_cookie; /* infinity value */ 87 struct async_entry *first = NULL;
92 async_cookie_t first_pending = next_cookie; /* ditto */ 88 async_cookie_t ret = ASYNC_COOKIE_MAX;
93 struct async_entry *entry; 89 unsigned long flags;
94 90
95 /* 91 spin_lock_irqsave(&async_lock, flags);
96 * Both running and pending lists are sorted but not disjoint.
97 * Take the first cookies from both and return the min.
98 */
99 if (!list_empty(&running->domain)) {
100 entry = list_first_entry(&running->domain, typeof(*entry), list);
101 first_running = entry->cookie;
102 }
103 92
104 list_for_each_entry(entry, &async_pending, list) { 93 if (domain) {
105 if (entry->running == running) { 94 if (!list_empty(&domain->pending))
106 first_pending = entry->cookie; 95 first = list_first_entry(&domain->pending,
107 break; 96 struct async_entry, domain_list);
108 } 97 } else {
98 if (!list_empty(&async_global_pending))
99 first = list_first_entry(&async_global_pending,
100 struct async_entry, global_list);
109 } 101 }
110 102
111 return min(first_running, first_pending); 103 if (first)
112} 104 ret = first->cookie;
113 105
114static async_cookie_t lowest_in_progress(struct async_domain *running)
115{
116 unsigned long flags;
117 async_cookie_t ret;
118
119 spin_lock_irqsave(&async_lock, flags);
120 ret = __lowest_in_progress(running);
121 spin_unlock_irqrestore(&async_lock, flags); 106 spin_unlock_irqrestore(&async_lock, flags);
122 return ret; 107 return ret;
123} 108}
@@ -129,20 +114,10 @@ static void async_run_entry_fn(struct work_struct *work)
129{ 114{
130 struct async_entry *entry = 115 struct async_entry *entry =
131 container_of(work, struct async_entry, work); 116 container_of(work, struct async_entry, work);
132 struct async_entry *pos;
133 unsigned long flags; 117 unsigned long flags;
134 ktime_t uninitialized_var(calltime), delta, rettime; 118 ktime_t uninitialized_var(calltime), delta, rettime;
135 struct async_domain *running = entry->running;
136
137 /* 1) move self to the running queue, make sure it stays sorted */
138 spin_lock_irqsave(&async_lock, flags);
139 list_for_each_entry_reverse(pos, &running->domain, list)
140 if (entry->cookie < pos->cookie)
141 break;
142 list_move_tail(&entry->list, &pos->list);
143 spin_unlock_irqrestore(&async_lock, flags);
144 119
145 /* 2) run (and print duration) */ 120 /* 1) run (and print duration) */
146 if (initcall_debug && system_state == SYSTEM_BOOTING) { 121 if (initcall_debug && system_state == SYSTEM_BOOTING) {
147 printk(KERN_DEBUG "calling %lli_%pF @ %i\n", 122 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
148 (long long)entry->cookie, 123 (long long)entry->cookie,
@@ -159,23 +134,22 @@ static void async_run_entry_fn(struct work_struct *work)
159 (long long)ktime_to_ns(delta) >> 10); 134 (long long)ktime_to_ns(delta) >> 10);
160 } 135 }
161 136
162 /* 3) remove self from the running queue */ 137 /* 2) remove self from the pending queues */
163 spin_lock_irqsave(&async_lock, flags); 138 spin_lock_irqsave(&async_lock, flags);
164 list_del(&entry->list); 139 list_del_init(&entry->domain_list);
165 if (running->registered && --running->count == 0) 140 list_del_init(&entry->global_list);
166 list_del_init(&running->node);
167 141
168 /* 4) free the entry */ 142 /* 3) free the entry */
169 kfree(entry); 143 kfree(entry);
170 atomic_dec(&entry_count); 144 atomic_dec(&entry_count);
171 145
172 spin_unlock_irqrestore(&async_lock, flags); 146 spin_unlock_irqrestore(&async_lock, flags);
173 147
174 /* 5) wake up any waiters */ 148 /* 4) wake up any waiters */
175 wake_up(&async_done); 149 wake_up(&async_done);
176} 150}
177 151
178static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) 152static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
179{ 153{
180 struct async_entry *entry; 154 struct async_entry *entry;
181 unsigned long flags; 155 unsigned long flags;
@@ -198,16 +172,22 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
198 ptr(data, newcookie); 172 ptr(data, newcookie);
199 return newcookie; 173 return newcookie;
200 } 174 }
175 INIT_LIST_HEAD(&entry->domain_list);
176 INIT_LIST_HEAD(&entry->global_list);
201 INIT_WORK(&entry->work, async_run_entry_fn); 177 INIT_WORK(&entry->work, async_run_entry_fn);
202 entry->func = ptr; 178 entry->func = ptr;
203 entry->data = data; 179 entry->data = data;
204 entry->running = running; 180 entry->domain = domain;
205 181
206 spin_lock_irqsave(&async_lock, flags); 182 spin_lock_irqsave(&async_lock, flags);
183
184 /* allocate cookie and queue */
207 newcookie = entry->cookie = next_cookie++; 185 newcookie = entry->cookie = next_cookie++;
208 list_add_tail(&entry->list, &async_pending); 186
209 if (running->registered && running->count++ == 0) 187 list_add_tail(&entry->domain_list, &domain->pending);
210 list_add_tail(&running->node, &async_domains); 188 if (domain->registered)
189 list_add_tail(&entry->global_list, &async_global_pending);
190
211 atomic_inc(&entry_count); 191 atomic_inc(&entry_count);
212 spin_unlock_irqrestore(&async_lock, flags); 192 spin_unlock_irqrestore(&async_lock, flags);
213 193
@@ -230,7 +210,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
230 */ 210 */
231async_cookie_t async_schedule(async_func_ptr *ptr, void *data) 211async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
232{ 212{
233 return __async_schedule(ptr, data, &async_running); 213 return __async_schedule(ptr, data, &async_dfl_domain);
234} 214}
235EXPORT_SYMBOL_GPL(async_schedule); 215EXPORT_SYMBOL_GPL(async_schedule);
236 216
@@ -238,18 +218,18 @@ EXPORT_SYMBOL_GPL(async_schedule);
238 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain 218 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
239 * @ptr: function to execute asynchronously 219 * @ptr: function to execute asynchronously
240 * @data: data pointer to pass to the function 220 * @data: data pointer to pass to the function
241 * @running: running list for the domain 221 * @domain: the domain
242 * 222 *
243 * Returns an async_cookie_t that may be used for checkpointing later. 223 * Returns an async_cookie_t that may be used for checkpointing later.
244 * @running may be used in the async_synchronize_*_domain() functions 224 * @domain may be used in the async_synchronize_*_domain() functions to
245 * to wait within a certain synchronization domain rather than globally. 225 * wait within a certain synchronization domain rather than globally. A
246 * A synchronization domain is specified via the running queue @running to use. 226 * synchronization domain is specified via @domain. Note: This function
247 * Note: This function may be called from atomic or non-atomic contexts. 227 * may be called from atomic or non-atomic contexts.
248 */ 228 */
249async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, 229async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
250 struct async_domain *running) 230 struct async_domain *domain)
251{ 231{
252 return __async_schedule(ptr, data, running); 232 return __async_schedule(ptr, data, domain);
253} 233}
254EXPORT_SYMBOL_GPL(async_schedule_domain); 234EXPORT_SYMBOL_GPL(async_schedule_domain);
255 235
@@ -260,18 +240,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
260 */ 240 */
261void async_synchronize_full(void) 241void async_synchronize_full(void)
262{ 242{
263 mutex_lock(&async_register_mutex); 243 async_synchronize_full_domain(NULL);
264 do {
265 struct async_domain *domain = NULL;
266
267 spin_lock_irq(&async_lock);
268 if (!list_empty(&async_domains))
269 domain = list_first_entry(&async_domains, typeof(*domain), node);
270 spin_unlock_irq(&async_lock);
271
272 async_synchronize_cookie_domain(next_cookie, domain);
273 } while (!list_empty(&async_domains));
274 mutex_unlock(&async_register_mutex);
275} 244}
276EXPORT_SYMBOL_GPL(async_synchronize_full); 245EXPORT_SYMBOL_GPL(async_synchronize_full);
277 246
@@ -286,51 +255,45 @@ EXPORT_SYMBOL_GPL(async_synchronize_full);
286 */ 255 */
287void async_unregister_domain(struct async_domain *domain) 256void async_unregister_domain(struct async_domain *domain)
288{ 257{
289 mutex_lock(&async_register_mutex);
290 spin_lock_irq(&async_lock); 258 spin_lock_irq(&async_lock);
291 WARN_ON(!domain->registered || !list_empty(&domain->node) || 259 WARN_ON(!domain->registered || !list_empty(&domain->pending));
292 !list_empty(&domain->domain));
293 domain->registered = 0; 260 domain->registered = 0;
294 spin_unlock_irq(&async_lock); 261 spin_unlock_irq(&async_lock);
295 mutex_unlock(&async_register_mutex);
296} 262}
297EXPORT_SYMBOL_GPL(async_unregister_domain); 263EXPORT_SYMBOL_GPL(async_unregister_domain);
298 264
299/** 265/**
300 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain 266 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
301 * @domain: running list to synchronize on 267 * @domain: the domain to synchronize
302 * 268 *
303 * This function waits until all asynchronous function calls for the 269 * This function waits until all asynchronous function calls for the
304 * synchronization domain specified by the running list @domain have been done. 270 * synchronization domain specified by @domain have been done.
305 */ 271 */
306void async_synchronize_full_domain(struct async_domain *domain) 272void async_synchronize_full_domain(struct async_domain *domain)
307{ 273{
308 async_synchronize_cookie_domain(next_cookie, domain); 274 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
309} 275}
310EXPORT_SYMBOL_GPL(async_synchronize_full_domain); 276EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
311 277
312/** 278/**
313 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing 279 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
314 * @cookie: async_cookie_t to use as checkpoint 280 * @cookie: async_cookie_t to use as checkpoint
315 * @running: running list to synchronize on 281 * @domain: the domain to synchronize (%NULL for all registered domains)
316 * 282 *
317 * This function waits until all asynchronous function calls for the 283 * This function waits until all asynchronous function calls for the
318 * synchronization domain specified by running list @running submitted 284 * synchronization domain specified by @domain submitted prior to @cookie
319 * prior to @cookie have been done. 285 * have been done.
320 */ 286 */
321void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) 287void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
322{ 288{
323 ktime_t uninitialized_var(starttime), delta, endtime; 289 ktime_t uninitialized_var(starttime), delta, endtime;
324 290
325 if (!running)
326 return;
327
328 if (initcall_debug && system_state == SYSTEM_BOOTING) { 291 if (initcall_debug && system_state == SYSTEM_BOOTING) {
329 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); 292 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
330 starttime = ktime_get(); 293 starttime = ktime_get();
331 } 294 }
332 295
333 wait_event(async_done, lowest_in_progress(running) >= cookie); 296 wait_event(async_done, lowest_in_progress(domain) >= cookie);
334 297
335 if (initcall_debug && system_state == SYSTEM_BOOTING) { 298 if (initcall_debug && system_state == SYSTEM_BOOTING) {
336 endtime = ktime_get(); 299 endtime = ktime_get();
@@ -352,7 +315,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
352 */ 315 */
353void async_synchronize_cookie(async_cookie_t cookie) 316void async_synchronize_cookie(async_cookie_t cookie)
354{ 317{
355 async_synchronize_cookie_domain(cookie, &async_running); 318 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
356} 319}
357EXPORT_SYMBOL_GPL(async_synchronize_cookie); 320EXPORT_SYMBOL_GPL(async_synchronize_cookie);
358 321
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 0023a87e8de6..56dd34976d7b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -38,6 +38,7 @@
38#include <linux/suspend.h> 38#include <linux/suspend.h>
39#include <linux/rwsem.h> 39#include <linux/rwsem.h>
40#include <linux/ptrace.h> 40#include <linux/ptrace.h>
41#include <linux/async.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42 43
43#include <trace/events/module.h> 44#include <trace/events/module.h>
@@ -130,6 +131,14 @@ int __request_module(bool wait, const char *fmt, ...)
130#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 131#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
131 static int kmod_loop_msg; 132 static int kmod_loop_msg;
132 133
134 /*
135 * We don't allow synchronous module loading from async. Module
136 * init may invoke async_synchronize_full() which will end up
137 * waiting for this task which already is waiting for the module
138 * loading to complete, leading to a deadlock.
139 */
140 WARN_ON_ONCE(wait && current_is_async());
141
133 va_start(args, fmt); 142 va_start(args, fmt);
134 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 143 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
135 va_end(args); 144 va_end(args);