aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kmod.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kmod.c')
-rw-r--r--kernel/kmod.c164
1 files changed, 150 insertions, 14 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 3a7379aa31ca..9f923f8ce6a0 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -36,6 +36,8 @@
36#include <linux/resource.h> 36#include <linux/resource.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39extern int delete_module(const char *name, unsigned int flags);
40
39extern int max_threads; 41extern int max_threads;
40 42
41static struct workqueue_struct *khelper_wq; 43static struct workqueue_struct *khelper_wq;
@@ -46,6 +48,7 @@ static struct workqueue_struct *khelper_wq;
46 modprobe_path is set via /proc/sys. 48 modprobe_path is set via /proc/sys.
47*/ 49*/
48char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; 50char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
51struct module_kobject kmod_mk;
49 52
50/** 53/**
51 * request_module - try to load a kernel module 54 * request_module - try to load a kernel module
@@ -75,6 +78,11 @@ int request_module(const char *fmt, ...)
75 static atomic_t kmod_concurrent = ATOMIC_INIT(0); 78 static atomic_t kmod_concurrent = ATOMIC_INIT(0);
76#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 79#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
77 static int kmod_loop_msg; 80 static int kmod_loop_msg;
81 char modalias[16 + MODULE_NAME_LEN] = "MODALIAS=";
82 char *uevent_envp[2] = {
83 modalias,
84 NULL
85 };
78 86
79 va_start(args, fmt); 87 va_start(args, fmt);
80 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 88 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
@@ -82,6 +90,12 @@ int request_module(const char *fmt, ...)
82 if (ret >= MODULE_NAME_LEN) 90 if (ret >= MODULE_NAME_LEN)
83 return -ENAMETOOLONG; 91 return -ENAMETOOLONG;
84 92
93 strcpy(&modalias[strlen("MODALIAS=")], module_name);
94 kobject_uevent_env(&kmod_mk.kobj, KOBJ_CHANGE, uevent_envp);
95
96 if (modprobe_path[0] == '\0')
97 goto out;
98
85 /* If modprobe needs a service that is in a module, we get a recursive 99 /* If modprobe needs a service that is in a module, we get a recursive
86 * loop. Limit the number of running kmod threads to max_threads/2 or 100 * loop. Limit the number of running kmod threads to max_threads/2 or
87 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method 101 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
@@ -108,9 +122,115 @@ int request_module(const char *fmt, ...)
108 122
109 ret = call_usermodehelper(modprobe_path, argv, envp, 1); 123 ret = call_usermodehelper(modprobe_path, argv, envp, 1);
110 atomic_dec(&kmod_concurrent); 124 atomic_dec(&kmod_concurrent);
125out:
111 return ret; 126 return ret;
112} 127}
113EXPORT_SYMBOL(request_module); 128EXPORT_SYMBOL(request_module);
129
130static ssize_t store_mod_request(struct module_attribute *mattr,
131 struct module *mod,
132 const char *buffer, size_t count)
133{
134 char name[MODULE_NAME_LEN];
135 int ret;
136
137 if (count < 1 || count+1 > MODULE_NAME_LEN)
138 return -EINVAL;
139 memcpy(name, buffer, count);
140 name[count] = '\0';
141 if (name[count-1] == '\n')
142 name[count-1] = '\0';
143
144 ret = request_module(name);
145 if (ret < 0)
146 return ret;
147 return count;
148}
149
150static struct module_attribute mod_request = {
151 .attr = { .name = "mod_request", .mode = S_IWUSR, .owner = THIS_MODULE },
152 .store = store_mod_request,
153};
154
155#ifdef CONFIG_MODULE_UNLOAD
156static ssize_t store_mod_unload(struct module_attribute *mattr,
157 struct module *mod,
158 const char *buffer, size_t count)
159{
160 char name[MODULE_NAME_LEN];
161 int ret;
162
163 if (count < 1 || count+1 > MODULE_NAME_LEN)
164 return -EINVAL;
165 memcpy(name, buffer, count);
166 name[count] = '\0';
167 if (name[count-1] == '\n')
168 name[count-1] = '\0';
169
170 ret = delete_module(name, O_NONBLOCK);
171 if (ret < 0)
172 return ret;
173 return count;
174}
175
176static struct module_attribute mod_unload = {
177 .attr = { .name = "mod_unload", .mode = S_IWUSR, .owner = THIS_MODULE },
178 .store = store_mod_unload,
179};
180#endif
181
182static ssize_t show_mod_request_helper(struct module_attribute *mattr,
183 struct module *mod,
184 char *buffer)
185{
186 return sprintf(buffer, "%s\n", modprobe_path);
187}
188
189static ssize_t store_mod_request_helper(struct module_attribute *mattr,
190 struct module *mod,
191 const char *buffer, size_t count)
192{
193 if (count < 1 || count+1 > KMOD_PATH_LEN)
194 return -EINVAL;
195 memcpy(modprobe_path, buffer, count);
196 modprobe_path[count] = '\0';
197 if (modprobe_path[count-1] == '\n')
198 modprobe_path[count-1] = '\0';
199 return count;
200}
201
202static struct module_attribute mod_request_helper = {
203 .attr = {
204 .name = "mod_request_helper",
205 .mode = S_IWUSR | S_IRUGO,
206 .owner = THIS_MODULE
207 },
208 .show = show_mod_request_helper,
209 .store = store_mod_request_helper,
210};
211
212void __init kmod_sysfs_init(void)
213{
214 int ret;
215
216 kmod_mk.mod = THIS_MODULE;
217 kobj_set_kset_s(&kmod_mk, module_subsys);
218 kobject_set_name(&kmod_mk.kobj, "kmod");
219 kobject_init(&kmod_mk.kobj);
220 ret = kobject_add(&kmod_mk.kobj);
221 if (ret < 0)
222 goto out;
223
224 ret = sysfs_create_file(&kmod_mk.kobj, &mod_request_helper.attr);
225 ret = sysfs_create_file(&kmod_mk.kobj, &mod_request.attr);
226#ifdef CONFIG_MODULE_UNLOAD
227 ret = sysfs_create_file(&kmod_mk.kobj, &mod_unload.attr);
228#endif
229
230 kobject_uevent(&kmod_mk.kobj, KOBJ_ADD);
231out:
232 return;
233}
114#endif /* CONFIG_KMOD */ 234#endif /* CONFIG_KMOD */
115 235
116struct subprocess_info { 236struct subprocess_info {
@@ -217,7 +337,10 @@ static int wait_for_helper(void *data)
217 sub_info->retval = ret; 337 sub_info->retval = ret;
218 } 338 }
219 339
220 complete(sub_info->complete); 340 if (sub_info->wait < 0)
341 kfree(sub_info);
342 else
343 complete(sub_info->complete);
221 return 0; 344 return 0;
222} 345}
223 346
@@ -239,6 +362,9 @@ static void __call_usermodehelper(struct work_struct *work)
239 pid = kernel_thread(____call_usermodehelper, sub_info, 362 pid = kernel_thread(____call_usermodehelper, sub_info,
240 CLONE_VFORK | SIGCHLD); 363 CLONE_VFORK | SIGCHLD);
241 364
365 if (wait < 0)
366 return;
367
242 if (pid < 0) { 368 if (pid < 0) {
243 sub_info->retval = pid; 369 sub_info->retval = pid;
244 complete(sub_info->complete); 370 complete(sub_info->complete);
@@ -253,6 +379,9 @@ static void __call_usermodehelper(struct work_struct *work)
253 * @envp: null-terminated environment list 379 * @envp: null-terminated environment list
254 * @session_keyring: session keyring for process (NULL for an empty keyring) 380 * @session_keyring: session keyring for process (NULL for an empty keyring)
255 * @wait: wait for the application to finish and return status. 381 * @wait: wait for the application to finish and return status.
382 * when -1 don't wait at all, but you get no useful error back when
383 * the program couldn't be exec'ed. This makes it safe to call
384 * from interrupt context.
256 * 385 *
257 * Runs a user-space application. The application is started 386 * Runs a user-space application. The application is started
258 * asynchronously if wait is not set, and runs as a child of keventd. 387 * asynchronously if wait is not set, and runs as a child of keventd.
@@ -265,17 +394,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
265 struct key *session_keyring, int wait) 394 struct key *session_keyring, int wait)
266{ 395{
267 DECLARE_COMPLETION_ONSTACK(done); 396 DECLARE_COMPLETION_ONSTACK(done);
268 struct subprocess_info sub_info = { 397 struct subprocess_info *sub_info;
269 .work = __WORK_INITIALIZER(sub_info.work, 398 int retval;
270 __call_usermodehelper),
271 .complete = &done,
272 .path = path,
273 .argv = argv,
274 .envp = envp,
275 .ring = session_keyring,
276 .wait = wait,
277 .retval = 0,
278 };
279 399
280 if (!khelper_wq) 400 if (!khelper_wq)
281 return -EBUSY; 401 return -EBUSY;
@@ -283,9 +403,25 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
283 if (path[0] == '\0') 403 if (path[0] == '\0')
284 return 0; 404 return 0;
285 405
286 queue_work(khelper_wq, &sub_info.work); 406 sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC);
407 if (!sub_info)
408 return -ENOMEM;
409
410 INIT_WORK(&sub_info->work, __call_usermodehelper);
411 sub_info->complete = &done;
412 sub_info->path = path;
413 sub_info->argv = argv;
414 sub_info->envp = envp;
415 sub_info->ring = session_keyring;
416 sub_info->wait = wait;
417
418 queue_work(khelper_wq, &sub_info->work);
419 if (wait < 0) /* task has freed sub_info */
420 return 0;
287 wait_for_completion(&done); 421 wait_for_completion(&done);
288 return sub_info.retval; 422 retval = sub_info->retval;
423 kfree(sub_info);
424 return retval;
289} 425}
290EXPORT_SYMBOL(call_usermodehelper_keys); 426EXPORT_SYMBOL(call_usermodehelper_keys);
291 427