aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-09-09 18:38:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 16:29:01 -0400
commit90f023030e26ce8f981b3e688cb79329d8d07cc3 (patch)
treec000e9501dabe18902380d25cedaaf6ceecc2a2f
parentb639e86bae431db3fbc9fae8d09a9bbf97b74711 (diff)
kmod: use system_unbound_wq instead of khelper
We need to launch the usermodehelper kernel threads with the widest affinity and this is partly why we use khelper. This workqueue has unbound properties and thus a wide affinity inherited by all its children. Now khelper also has special properties that we aren't much interested in: ordered and singlethread. There is really no need about ordering as all we do is creating kernel threads. This can be done concurrently. And singlethread is a useless limitation as well. The workqueue engine already proposes generic unbound workqueues that don't share these useless properties and handle well parallel jobs. The only worrysome specific is their affinity to the node of the current CPU. It's fine for creating the usermodehelper kernel threads but those inherit this affinity for longer jobs such as requesting modules. This patch proposes to use these node affine unbound workqueues assuming that a node is sufficient to handle several parallel usermodehelper requests. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Rik van Riel <riel@redhat.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kmod.h2
-rw-r--r--init/main.c1
-rw-r--r--kernel/kmod.c40
3 files changed, 17 insertions, 26 deletions
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0555cc66a15b..fcfd2bf14d3f 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
85 UMH_DISABLED, 85 UMH_DISABLED,
86}; 86};
87 87
88extern void usermodehelper_init(void);
89
90extern int __usermodehelper_disable(enum umh_disable_depth depth); 88extern int __usermodehelper_disable(enum umh_disable_depth depth);
91extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); 89extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
92 90
diff --git a/init/main.c b/init/main.c
index 56506553d4d8..9e64d7097f1a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -877,7 +877,6 @@ static void __init do_initcalls(void)
877static void __init do_basic_setup(void) 877static void __init do_basic_setup(void)
878{ 878{
879 cpuset_init_smp(); 879 cpuset_init_smp();
880 usermodehelper_init();
881 shmem_init(); 880 shmem_init();
882 driver_init(); 881 driver_init();
883 init_irq_proc(); 882 init_irq_proc();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 81c67050c5aa..d38b2dab99a7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -45,8 +45,6 @@
45 45
46extern int max_threads; 46extern int max_threads;
47 47
48static struct workqueue_struct *khelper_wq;
49
50#define CAP_BSET (void *)1 48#define CAP_BSET (void *)1
51#define CAP_PI (void *)2 49#define CAP_PI (void *)2
52 50
@@ -225,7 +223,7 @@ static int call_usermodehelper_exec_async(void *data)
225 spin_unlock_irq(&current->sighand->siglock); 223 spin_unlock_irq(&current->sighand->siglock);
226 224
227 /* 225 /*
228 * Our parent is khelper which runs with elevated scheduling 226 * Our parent (unbound workqueue) runs with elevated scheduling
229 * priority. Avoid propagating that into the userspace child. 227 * priority. Avoid propagating that into the userspace child.
230 */ 228 */
231 set_user_nice(current, 0); 229 set_user_nice(current, 0);
@@ -268,9 +266,10 @@ out:
268} 266}
269 267
270/* 268/*
271 * Handles UMH_WAIT_PROC. Our parent khelper can't wait for usermodehelper 269 * Handles UMH_WAIT_PROC. Our parent (unbound workqueue) might not be able to
272 * completion without blocking every other pending requests. That's why 270 * run enough instances to handle usermodehelper completions without blocking
273 * we use a kernel thread dedicated for that purpose. 271 * some other pending requests. That's why we use a kernel thread dedicated for
272 * that purpose.
274 */ 273 */
275static int call_usermodehelper_exec_sync(void *data) 274static int call_usermodehelper_exec_sync(void *data)
276{ 275{
@@ -312,14 +311,15 @@ static int call_usermodehelper_exec_sync(void *data)
312/* 311/*
313 * This function doesn't strictly needs to be called asynchronously. But we 312 * This function doesn't strictly needs to be called asynchronously. But we
314 * need to create the usermodehelper kernel threads from a task that is affine 313 * need to create the usermodehelper kernel threads from a task that is affine
315 * to all CPUs (or nohz housekeeping ones) such that they inherit a widest 314 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
316 * affinity irrespective of call_usermodehelper() callers with possibly reduced 315 * inherit a widest affinity irrespective of call_usermodehelper() callers with
317 * affinity (eg: per-cpu workqueues). We don't want usermodehelper targets to 316 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
318 * contend any busy CPU. 317 * usermodehelper targets to contend a busy CPU.
319 * Khelper provides such wide affinity. 318 *
319 * Unbound workqueues provide such wide affinity.
320 * 320 *
321 * Besides, khelper provides the privilege level that caller might not have to 321 * Besides, workqueues provide the privilege level that caller might not have
322 * perform the usermodehelper request. 322 * to perform the usermodehelper request.
323 * 323 *
324 */ 324 */
325static void call_usermodehelper_exec_work(struct work_struct *work) 325static void call_usermodehelper_exec_work(struct work_struct *work)
@@ -549,8 +549,8 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
549 * from interrupt context. 549 * from interrupt context.
550 * 550 *
551 * Runs a user-space application. The application is started 551 * Runs a user-space application. The application is started
552 * asynchronously if wait is not set, and runs as a child of khelper. 552 * asynchronously if wait is not set, and runs as a child of system workqueues.
553 * (ie. it runs with full root capabilities and wide affinity). 553 * (ie. it runs with full root capabilities and optimized affinity).
554 */ 554 */
555int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 555int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
556{ 556{
@@ -562,7 +562,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
562 return -EINVAL; 562 return -EINVAL;
563 } 563 }
564 helper_lock(); 564 helper_lock();
565 if (!khelper_wq || usermodehelper_disabled) { 565 if (usermodehelper_disabled) {
566 retval = -EBUSY; 566 retval = -EBUSY;
567 goto out; 567 goto out;
568 } 568 }
@@ -574,7 +574,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
574 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; 574 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
575 sub_info->wait = wait; 575 sub_info->wait = wait;
576 576
577 queue_work(khelper_wq, &sub_info->work); 577 queue_work(system_unbound_wq, &sub_info->work);
578 if (wait == UMH_NO_WAIT) /* task has freed sub_info */ 578 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
579 goto unlock; 579 goto unlock;
580 580
@@ -704,9 +704,3 @@ struct ctl_table usermodehelper_table[] = {
704 }, 704 },
705 { } 705 { }
706}; 706};
707
708void __init usermodehelper_init(void)
709{
710 khelper_wq = create_singlethread_workqueue("khelper");
711 BUG_ON(!khelper_wq);
712}