diff options
Diffstat (limited to 'kernel/kmod.c')
-rw-r--r-- | kernel/kmod.c | 201 |
1 files changed, 156 insertions, 45 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c index a0a88543934e..05698a7415fe 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -60,6 +60,43 @@ static DECLARE_RWSEM(umhelper_sem); | |||
60 | */ | 60 | */ |
61 | char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; | 61 | char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; |
62 | 62 | ||
63 | static void free_modprobe_argv(struct subprocess_info *info) | ||
64 | { | ||
65 | kfree(info->argv[3]); /* check call_modprobe() */ | ||
66 | kfree(info->argv); | ||
67 | } | ||
68 | |||
69 | static int call_modprobe(char *module_name, int wait) | ||
70 | { | ||
71 | static char *envp[] = { | ||
72 | "HOME=/", | ||
73 | "TERM=linux", | ||
74 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", | ||
75 | NULL | ||
76 | }; | ||
77 | |||
78 | char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); | ||
79 | if (!argv) | ||
80 | goto out; | ||
81 | |||
82 | module_name = kstrdup(module_name, GFP_KERNEL); | ||
83 | if (!module_name) | ||
84 | goto free_argv; | ||
85 | |||
86 | argv[0] = modprobe_path; | ||
87 | argv[1] = "-q"; | ||
88 | argv[2] = "--"; | ||
89 | argv[3] = module_name; /* check free_modprobe_argv() */ | ||
90 | argv[4] = NULL; | ||
91 | |||
92 | return call_usermodehelper_fns(modprobe_path, argv, envp, | ||
93 | wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); | ||
94 | free_argv: | ||
95 | kfree(argv); | ||
96 | out: | ||
97 | return -ENOMEM; | ||
98 | } | ||
99 | |||
63 | /** | 100 | /** |
64 | * __request_module - try to load a kernel module | 101 | * __request_module - try to load a kernel module |
65 | * @wait: wait (or not) for the operation to complete | 102 | * @wait: wait (or not) for the operation to complete |
@@ -81,11 +118,6 @@ int __request_module(bool wait, const char *fmt, ...) | |||
81 | char module_name[MODULE_NAME_LEN]; | 118 | char module_name[MODULE_NAME_LEN]; |
82 | unsigned int max_modprobes; | 119 | unsigned int max_modprobes; |
83 | int ret; | 120 | int ret; |
84 | char *argv[] = { modprobe_path, "-q", "--", module_name, NULL }; | ||
85 | static char *envp[] = { "HOME=/", | ||
86 | "TERM=linux", | ||
87 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", | ||
88 | NULL }; | ||
89 | static atomic_t kmod_concurrent = ATOMIC_INIT(0); | 121 | static atomic_t kmod_concurrent = ATOMIC_INIT(0); |
90 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ | 122 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ |
91 | static int kmod_loop_msg; | 123 | static int kmod_loop_msg; |
@@ -128,9 +160,7 @@ int __request_module(bool wait, const char *fmt, ...) | |||
128 | 160 | ||
129 | trace_module_request(module_name, wait, _RET_IP_); | 161 | trace_module_request(module_name, wait, _RET_IP_); |
130 | 162 | ||
131 | ret = call_usermodehelper_fns(modprobe_path, argv, envp, | 163 | ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); |
132 | wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC, | ||
133 | NULL, NULL, NULL); | ||
134 | 164 | ||
135 | atomic_dec(&kmod_concurrent); | 165 | atomic_dec(&kmod_concurrent); |
136 | return ret; | 166 | return ret; |
@@ -188,7 +218,7 @@ static int ____call_usermodehelper(void *data) | |||
188 | /* Exec failed? */ | 218 | /* Exec failed? */ |
189 | fail: | 219 | fail: |
190 | sub_info->retval = retval; | 220 | sub_info->retval = retval; |
191 | do_exit(0); | 221 | return 0; |
192 | } | 222 | } |
193 | 223 | ||
194 | void call_usermodehelper_freeinfo(struct subprocess_info *info) | 224 | void call_usermodehelper_freeinfo(struct subprocess_info *info) |
@@ -199,6 +229,19 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info) | |||
199 | } | 229 | } |
200 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); | 230 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); |
201 | 231 | ||
232 | static void umh_complete(struct subprocess_info *sub_info) | ||
233 | { | ||
234 | struct completion *comp = xchg(&sub_info->complete, NULL); | ||
235 | /* | ||
236 | * See call_usermodehelper_exec(). If xchg() returns NULL | ||
237 | * we own sub_info, the UMH_KILLABLE caller has gone away. | ||
238 | */ | ||
239 | if (comp) | ||
240 | complete(comp); | ||
241 | else | ||
242 | call_usermodehelper_freeinfo(sub_info); | ||
243 | } | ||
244 | |||
202 | /* Keventd can't block, but this (a child) can. */ | 245 | /* Keventd can't block, but this (a child) can. */ |
203 | static int wait_for_helper(void *data) | 246 | static int wait_for_helper(void *data) |
204 | { | 247 | { |
@@ -235,7 +278,7 @@ static int wait_for_helper(void *data) | |||
235 | sub_info->retval = ret; | 278 | sub_info->retval = ret; |
236 | } | 279 | } |
237 | 280 | ||
238 | complete(sub_info->complete); | 281 | umh_complete(sub_info); |
239 | return 0; | 282 | return 0; |
240 | } | 283 | } |
241 | 284 | ||
@@ -244,7 +287,7 @@ static void __call_usermodehelper(struct work_struct *work) | |||
244 | { | 287 | { |
245 | struct subprocess_info *sub_info = | 288 | struct subprocess_info *sub_info = |
246 | container_of(work, struct subprocess_info, work); | 289 | container_of(work, struct subprocess_info, work); |
247 | enum umh_wait wait = sub_info->wait; | 290 | int wait = sub_info->wait & ~UMH_KILLABLE; |
248 | pid_t pid; | 291 | pid_t pid; |
249 | 292 | ||
250 | /* CLONE_VFORK: wait until the usermode helper has execve'd | 293 | /* CLONE_VFORK: wait until the usermode helper has execve'd |
@@ -269,7 +312,7 @@ static void __call_usermodehelper(struct work_struct *work) | |||
269 | case UMH_WAIT_EXEC: | 312 | case UMH_WAIT_EXEC: |
270 | if (pid < 0) | 313 | if (pid < 0) |
271 | sub_info->retval = pid; | 314 | sub_info->retval = pid; |
272 | complete(sub_info->complete); | 315 | umh_complete(sub_info); |
273 | } | 316 | } |
274 | } | 317 | } |
275 | 318 | ||
@@ -279,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work) | |||
279 | * land has been frozen during a system-wide hibernation or suspend operation). | 322 | * land has been frozen during a system-wide hibernation or suspend operation). |
280 | * Should always be manipulated under umhelper_sem acquired for write. | 323 | * Should always be manipulated under umhelper_sem acquired for write. |
281 | */ | 324 | */ |
282 | static int usermodehelper_disabled = 1; | 325 | static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; |
283 | 326 | ||
284 | /* Number of helpers running */ | 327 | /* Number of helpers running */ |
285 | static atomic_t running_helpers = ATOMIC_INIT(0); | 328 | static atomic_t running_helpers = ATOMIC_INIT(0); |
@@ -291,32 +334,110 @@ static atomic_t running_helpers = ATOMIC_INIT(0); | |||
291 | static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); | 334 | static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); |
292 | 335 | ||
293 | /* | 336 | /* |
337 | * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled | ||
338 | * to become 'false'. | ||
339 | */ | ||
340 | static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); | ||
341 | |||
342 | /* | ||
294 | * Time to wait for running_helpers to become zero before the setting of | 343 | * Time to wait for running_helpers to become zero before the setting of |
295 | * usermodehelper_disabled in usermodehelper_disable() fails | 344 | * usermodehelper_disabled in usermodehelper_disable() fails |
296 | */ | 345 | */ |
297 | #define RUNNING_HELPERS_TIMEOUT (5 * HZ) | 346 | #define RUNNING_HELPERS_TIMEOUT (5 * HZ) |
298 | 347 | ||
299 | void read_lock_usermodehelper(void) | 348 | int usermodehelper_read_trylock(void) |
349 | { | ||
350 | DEFINE_WAIT(wait); | ||
351 | int ret = 0; | ||
352 | |||
353 | down_read(&umhelper_sem); | ||
354 | for (;;) { | ||
355 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, | ||
356 | TASK_INTERRUPTIBLE); | ||
357 | if (!usermodehelper_disabled) | ||
358 | break; | ||
359 | |||
360 | if (usermodehelper_disabled == UMH_DISABLED) | ||
361 | ret = -EAGAIN; | ||
362 | |||
363 | up_read(&umhelper_sem); | ||
364 | |||
365 | if (ret) | ||
366 | break; | ||
367 | |||
368 | schedule(); | ||
369 | try_to_freeze(); | ||
370 | |||
371 | down_read(&umhelper_sem); | ||
372 | } | ||
373 | finish_wait(&usermodehelper_disabled_waitq, &wait); | ||
374 | return ret; | ||
375 | } | ||
376 | EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); | ||
377 | |||
378 | long usermodehelper_read_lock_wait(long timeout) | ||
300 | { | 379 | { |
380 | DEFINE_WAIT(wait); | ||
381 | |||
382 | if (timeout < 0) | ||
383 | return -EINVAL; | ||
384 | |||
301 | down_read(&umhelper_sem); | 385 | down_read(&umhelper_sem); |
386 | for (;;) { | ||
387 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, | ||
388 | TASK_UNINTERRUPTIBLE); | ||
389 | if (!usermodehelper_disabled) | ||
390 | break; | ||
391 | |||
392 | up_read(&umhelper_sem); | ||
393 | |||
394 | timeout = schedule_timeout(timeout); | ||
395 | if (!timeout) | ||
396 | break; | ||
397 | |||
398 | down_read(&umhelper_sem); | ||
399 | } | ||
400 | finish_wait(&usermodehelper_disabled_waitq, &wait); | ||
401 | return timeout; | ||
302 | } | 402 | } |
303 | EXPORT_SYMBOL_GPL(read_lock_usermodehelper); | 403 | EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); |
304 | 404 | ||
305 | void read_unlock_usermodehelper(void) | 405 | void usermodehelper_read_unlock(void) |
306 | { | 406 | { |
307 | up_read(&umhelper_sem); | 407 | up_read(&umhelper_sem); |
308 | } | 408 | } |
309 | EXPORT_SYMBOL_GPL(read_unlock_usermodehelper); | 409 | EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); |
310 | 410 | ||
311 | /** | 411 | /** |
312 | * usermodehelper_disable - prevent new helpers from being started | 412 | * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. |
413 | * depth: New value to assign to usermodehelper_disabled. | ||
414 | * | ||
415 | * Change the value of usermodehelper_disabled (under umhelper_sem locked for | ||
416 | * writing) and wakeup tasks waiting for it to change. | ||
313 | */ | 417 | */ |
314 | int usermodehelper_disable(void) | 418 | void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) |
419 | { | ||
420 | down_write(&umhelper_sem); | ||
421 | usermodehelper_disabled = depth; | ||
422 | wake_up(&usermodehelper_disabled_waitq); | ||
423 | up_write(&umhelper_sem); | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * __usermodehelper_disable - Prevent new helpers from being started. | ||
428 | * @depth: New value to assign to usermodehelper_disabled. | ||
429 | * | ||
430 | * Set usermodehelper_disabled to @depth and wait for running helpers to exit. | ||
431 | */ | ||
432 | int __usermodehelper_disable(enum umh_disable_depth depth) | ||
315 | { | 433 | { |
316 | long retval; | 434 | long retval; |
317 | 435 | ||
436 | if (!depth) | ||
437 | return -EINVAL; | ||
438 | |||
318 | down_write(&umhelper_sem); | 439 | down_write(&umhelper_sem); |
319 | usermodehelper_disabled = 1; | 440 | usermodehelper_disabled = depth; |
320 | up_write(&umhelper_sem); | 441 | up_write(&umhelper_sem); |
321 | 442 | ||
322 | /* | 443 | /* |
@@ -331,31 +452,10 @@ int usermodehelper_disable(void) | |||
331 | if (retval) | 452 | if (retval) |
332 | return 0; | 453 | return 0; |
333 | 454 | ||
334 | down_write(&umhelper_sem); | 455 | __usermodehelper_set_disable_depth(UMH_ENABLED); |
335 | usermodehelper_disabled = 0; | ||
336 | up_write(&umhelper_sem); | ||
337 | return -EAGAIN; | 456 | return -EAGAIN; |
338 | } | 457 | } |
339 | 458 | ||
340 | /** | ||
341 | * usermodehelper_enable - allow new helpers to be started again | ||
342 | */ | ||
343 | void usermodehelper_enable(void) | ||
344 | { | ||
345 | down_write(&umhelper_sem); | ||
346 | usermodehelper_disabled = 0; | ||
347 | up_write(&umhelper_sem); | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * usermodehelper_is_disabled - check if new helpers are allowed to be started | ||
352 | */ | ||
353 | bool usermodehelper_is_disabled(void) | ||
354 | { | ||
355 | return usermodehelper_disabled; | ||
356 | } | ||
357 | EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); | ||
358 | |||
359 | static void helper_lock(void) | 459 | static void helper_lock(void) |
360 | { | 460 | { |
361 | atomic_inc(&running_helpers); | 461 | atomic_inc(&running_helpers); |
@@ -435,8 +535,7 @@ EXPORT_SYMBOL(call_usermodehelper_setfns); | |||
435 | * asynchronously if wait is not set, and runs as a child of keventd. | 535 | * asynchronously if wait is not set, and runs as a child of keventd. |
436 | * (ie. it runs with full root capabilities). | 536 | * (ie. it runs with full root capabilities). |
437 | */ | 537 | */ |
438 | int call_usermodehelper_exec(struct subprocess_info *sub_info, | 538 | int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) |
439 | enum umh_wait wait) | ||
440 | { | 539 | { |
441 | DECLARE_COMPLETION_ONSTACK(done); | 540 | DECLARE_COMPLETION_ONSTACK(done); |
442 | int retval = 0; | 541 | int retval = 0; |
@@ -456,9 +555,21 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, | |||
456 | queue_work(khelper_wq, &sub_info->work); | 555 | queue_work(khelper_wq, &sub_info->work); |
457 | if (wait == UMH_NO_WAIT) /* task has freed sub_info */ | 556 | if (wait == UMH_NO_WAIT) /* task has freed sub_info */ |
458 | goto unlock; | 557 | goto unlock; |
558 | |||
559 | if (wait & UMH_KILLABLE) { | ||
560 | retval = wait_for_completion_killable(&done); | ||
561 | if (!retval) | ||
562 | goto wait_done; | ||
563 | |||
564 | /* umh_complete() will see NULL and free sub_info */ | ||
565 | if (xchg(&sub_info->complete, NULL)) | ||
566 | goto unlock; | ||
567 | /* fallthrough, umh_complete() was already called */ | ||
568 | } | ||
569 | |||
459 | wait_for_completion(&done); | 570 | wait_for_completion(&done); |
571 | wait_done: | ||
460 | retval = sub_info->retval; | 572 | retval = sub_info->retval; |
461 | |||
462 | out: | 573 | out: |
463 | call_usermodehelper_freeinfo(sub_info); | 574 | call_usermodehelper_freeinfo(sub_info); |
464 | unlock: | 575 | unlock: |