diff options
Diffstat (limited to 'kernel/umh.c')
-rw-r--r-- | kernel/umh.c | 568 |
1 files changed, 568 insertions, 0 deletions
diff --git a/kernel/umh.c b/kernel/umh.c new file mode 100644 index 000000000000..6ff9905250ff --- /dev/null +++ b/kernel/umh.c | |||
@@ -0,0 +1,568 @@ | |||
1 | /* | ||
2 | * umh - the kernel usermode helper | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/sched/task.h> | ||
7 | #include <linux/binfmts.h> | ||
8 | #include <linux/syscalls.h> | ||
9 | #include <linux/unistd.h> | ||
10 | #include <linux/kmod.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/completion.h> | ||
13 | #include <linux/cred.h> | ||
14 | #include <linux/file.h> | ||
15 | #include <linux/fdtable.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <linux/security.h> | ||
18 | #include <linux/mount.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/resource.h> | ||
22 | #include <linux/notifier.h> | ||
23 | #include <linux/suspend.h> | ||
24 | #include <linux/rwsem.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/async.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | |||
29 | #include <trace/events/module.h> | ||
30 | |||
31 | #define CAP_BSET (void *)1 | ||
32 | #define CAP_PI (void *)2 | ||
33 | |||
34 | static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; | ||
35 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; | ||
36 | static DEFINE_SPINLOCK(umh_sysctl_lock); | ||
37 | static DECLARE_RWSEM(umhelper_sem); | ||
38 | |||
39 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) | ||
40 | { | ||
41 | if (info->cleanup) | ||
42 | (*info->cleanup)(info); | ||
43 | kfree(info); | ||
44 | } | ||
45 | |||
46 | static void umh_complete(struct subprocess_info *sub_info) | ||
47 | { | ||
48 | struct completion *comp = xchg(&sub_info->complete, NULL); | ||
49 | /* | ||
50 | * See call_usermodehelper_exec(). If xchg() returns NULL | ||
51 | * we own sub_info, the UMH_KILLABLE caller has gone away | ||
52 | * or the caller used UMH_NO_WAIT. | ||
53 | */ | ||
54 | if (comp) | ||
55 | complete(comp); | ||
56 | else | ||
57 | call_usermodehelper_freeinfo(sub_info); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * This is the task which runs the usermode application | ||
62 | */ | ||
63 | static int call_usermodehelper_exec_async(void *data) | ||
64 | { | ||
65 | struct subprocess_info *sub_info = data; | ||
66 | struct cred *new; | ||
67 | int retval; | ||
68 | |||
69 | spin_lock_irq(¤t->sighand->siglock); | ||
70 | flush_signal_handlers(current, 1); | ||
71 | spin_unlock_irq(¤t->sighand->siglock); | ||
72 | |||
73 | /* | ||
74 | * Our parent (unbound workqueue) runs with elevated scheduling | ||
75 | * priority. Avoid propagating that into the userspace child. | ||
76 | */ | ||
77 | set_user_nice(current, 0); | ||
78 | |||
79 | retval = -ENOMEM; | ||
80 | new = prepare_kernel_cred(current); | ||
81 | if (!new) | ||
82 | goto out; | ||
83 | |||
84 | spin_lock(&umh_sysctl_lock); | ||
85 | new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); | ||
86 | new->cap_inheritable = cap_intersect(usermodehelper_inheritable, | ||
87 | new->cap_inheritable); | ||
88 | spin_unlock(&umh_sysctl_lock); | ||
89 | |||
90 | if (sub_info->init) { | ||
91 | retval = sub_info->init(sub_info, new); | ||
92 | if (retval) { | ||
93 | abort_creds(new); | ||
94 | goto out; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | commit_creds(new); | ||
99 | |||
100 | retval = do_execve(getname_kernel(sub_info->path), | ||
101 | (const char __user *const __user *)sub_info->argv, | ||
102 | (const char __user *const __user *)sub_info->envp); | ||
103 | out: | ||
104 | sub_info->retval = retval; | ||
105 | /* | ||
106 | * call_usermodehelper_exec_sync() will call umh_complete | ||
107 | * if UHM_WAIT_PROC. | ||
108 | */ | ||
109 | if (!(sub_info->wait & UMH_WAIT_PROC)) | ||
110 | umh_complete(sub_info); | ||
111 | if (!retval) | ||
112 | return 0; | ||
113 | do_exit(0); | ||
114 | } | ||
115 | |||
116 | /* Handles UMH_WAIT_PROC. */ | ||
117 | static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info) | ||
118 | { | ||
119 | pid_t pid; | ||
120 | |||
121 | /* If SIGCLD is ignored sys_wait4 won't populate the status. */ | ||
122 | kernel_sigaction(SIGCHLD, SIG_DFL); | ||
123 | pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD); | ||
124 | if (pid < 0) { | ||
125 | sub_info->retval = pid; | ||
126 | } else { | ||
127 | int ret = -ECHILD; | ||
128 | /* | ||
129 | * Normally it is bogus to call wait4() from in-kernel because | ||
130 | * wait4() wants to write the exit code to a userspace address. | ||
131 | * But call_usermodehelper_exec_sync() always runs as kernel | ||
132 | * thread (workqueue) and put_user() to a kernel address works | ||
133 | * OK for kernel threads, due to their having an mm_segment_t | ||
134 | * which spans the entire address space. | ||
135 | * | ||
136 | * Thus the __user pointer cast is valid here. | ||
137 | */ | ||
138 | sys_wait4(pid, (int __user *)&ret, 0, NULL); | ||
139 | |||
140 | /* | ||
141 | * If ret is 0, either call_usermodehelper_exec_async failed and | ||
142 | * the real error code is already in sub_info->retval or | ||
143 | * sub_info->retval is 0 anyway, so don't mess with it then. | ||
144 | */ | ||
145 | if (ret) | ||
146 | sub_info->retval = ret; | ||
147 | } | ||
148 | |||
149 | /* Restore default kernel sig handler */ | ||
150 | kernel_sigaction(SIGCHLD, SIG_IGN); | ||
151 | |||
152 | umh_complete(sub_info); | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * We need to create the usermodehelper kernel thread from a task that is affine | ||
157 | * to an optimized set of CPUs (or nohz housekeeping ones) such that they | ||
158 | * inherit a widest affinity irrespective of call_usermodehelper() callers with | ||
159 | * possibly reduced affinity (eg: per-cpu workqueues). We don't want | ||
160 | * usermodehelper targets to contend a busy CPU. | ||
161 | * | ||
162 | * Unbound workqueues provide such wide affinity and allow to block on | ||
163 | * UMH_WAIT_PROC requests without blocking pending request (up to some limit). | ||
164 | * | ||
165 | * Besides, workqueues provide the privilege level that caller might not have | ||
166 | * to perform the usermodehelper request. | ||
167 | * | ||
168 | */ | ||
169 | static void call_usermodehelper_exec_work(struct work_struct *work) | ||
170 | { | ||
171 | struct subprocess_info *sub_info = | ||
172 | container_of(work, struct subprocess_info, work); | ||
173 | |||
174 | if (sub_info->wait & UMH_WAIT_PROC) { | ||
175 | call_usermodehelper_exec_sync(sub_info); | ||
176 | } else { | ||
177 | pid_t pid; | ||
178 | /* | ||
179 | * Use CLONE_PARENT to reparent it to kthreadd; we do not | ||
180 | * want to pollute current->children, and we need a parent | ||
181 | * that always ignores SIGCHLD to ensure auto-reaping. | ||
182 | */ | ||
183 | pid = kernel_thread(call_usermodehelper_exec_async, sub_info, | ||
184 | CLONE_PARENT | SIGCHLD); | ||
185 | if (pid < 0) { | ||
186 | sub_info->retval = pid; | ||
187 | umh_complete(sub_info); | ||
188 | } | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY | ||
194 | * (used for preventing user land processes from being created after the user | ||
195 | * land has been frozen during a system-wide hibernation or suspend operation). | ||
196 | * Should always be manipulated under umhelper_sem acquired for write. | ||
197 | */ | ||
198 | static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; | ||
199 | |||
200 | /* Number of helpers running */ | ||
201 | static atomic_t running_helpers = ATOMIC_INIT(0); | ||
202 | |||
203 | /* | ||
204 | * Wait queue head used by usermodehelper_disable() to wait for all running | ||
205 | * helpers to finish. | ||
206 | */ | ||
207 | static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); | ||
208 | |||
209 | /* | ||
210 | * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled | ||
211 | * to become 'false'. | ||
212 | */ | ||
213 | static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); | ||
214 | |||
215 | /* | ||
216 | * Time to wait for running_helpers to become zero before the setting of | ||
217 | * usermodehelper_disabled in usermodehelper_disable() fails | ||
218 | */ | ||
219 | #define RUNNING_HELPERS_TIMEOUT (5 * HZ) | ||
220 | |||
221 | int usermodehelper_read_trylock(void) | ||
222 | { | ||
223 | DEFINE_WAIT(wait); | ||
224 | int ret = 0; | ||
225 | |||
226 | down_read(&umhelper_sem); | ||
227 | for (;;) { | ||
228 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, | ||
229 | TASK_INTERRUPTIBLE); | ||
230 | if (!usermodehelper_disabled) | ||
231 | break; | ||
232 | |||
233 | if (usermodehelper_disabled == UMH_DISABLED) | ||
234 | ret = -EAGAIN; | ||
235 | |||
236 | up_read(&umhelper_sem); | ||
237 | |||
238 | if (ret) | ||
239 | break; | ||
240 | |||
241 | schedule(); | ||
242 | try_to_freeze(); | ||
243 | |||
244 | down_read(&umhelper_sem); | ||
245 | } | ||
246 | finish_wait(&usermodehelper_disabled_waitq, &wait); | ||
247 | return ret; | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); | ||
250 | |||
251 | long usermodehelper_read_lock_wait(long timeout) | ||
252 | { | ||
253 | DEFINE_WAIT(wait); | ||
254 | |||
255 | if (timeout < 0) | ||
256 | return -EINVAL; | ||
257 | |||
258 | down_read(&umhelper_sem); | ||
259 | for (;;) { | ||
260 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, | ||
261 | TASK_UNINTERRUPTIBLE); | ||
262 | if (!usermodehelper_disabled) | ||
263 | break; | ||
264 | |||
265 | up_read(&umhelper_sem); | ||
266 | |||
267 | timeout = schedule_timeout(timeout); | ||
268 | if (!timeout) | ||
269 | break; | ||
270 | |||
271 | down_read(&umhelper_sem); | ||
272 | } | ||
273 | finish_wait(&usermodehelper_disabled_waitq, &wait); | ||
274 | return timeout; | ||
275 | } | ||
276 | EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); | ||
277 | |||
278 | void usermodehelper_read_unlock(void) | ||
279 | { | ||
280 | up_read(&umhelper_sem); | ||
281 | } | ||
282 | EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); | ||
283 | |||
284 | /** | ||
285 | * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. | ||
286 | * @depth: New value to assign to usermodehelper_disabled. | ||
287 | * | ||
288 | * Change the value of usermodehelper_disabled (under umhelper_sem locked for | ||
289 | * writing) and wakeup tasks waiting for it to change. | ||
290 | */ | ||
291 | void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) | ||
292 | { | ||
293 | down_write(&umhelper_sem); | ||
294 | usermodehelper_disabled = depth; | ||
295 | wake_up(&usermodehelper_disabled_waitq); | ||
296 | up_write(&umhelper_sem); | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * __usermodehelper_disable - Prevent new helpers from being started. | ||
301 | * @depth: New value to assign to usermodehelper_disabled. | ||
302 | * | ||
303 | * Set usermodehelper_disabled to @depth and wait for running helpers to exit. | ||
304 | */ | ||
305 | int __usermodehelper_disable(enum umh_disable_depth depth) | ||
306 | { | ||
307 | long retval; | ||
308 | |||
309 | if (!depth) | ||
310 | return -EINVAL; | ||
311 | |||
312 | down_write(&umhelper_sem); | ||
313 | usermodehelper_disabled = depth; | ||
314 | up_write(&umhelper_sem); | ||
315 | |||
316 | /* | ||
317 | * From now on call_usermodehelper_exec() won't start any new | ||
318 | * helpers, so it is sufficient if running_helpers turns out to | ||
319 | * be zero at one point (it may be increased later, but that | ||
320 | * doesn't matter). | ||
321 | */ | ||
322 | retval = wait_event_timeout(running_helpers_waitq, | ||
323 | atomic_read(&running_helpers) == 0, | ||
324 | RUNNING_HELPERS_TIMEOUT); | ||
325 | if (retval) | ||
326 | return 0; | ||
327 | |||
328 | __usermodehelper_set_disable_depth(UMH_ENABLED); | ||
329 | return -EAGAIN; | ||
330 | } | ||
331 | |||
332 | static void helper_lock(void) | ||
333 | { | ||
334 | atomic_inc(&running_helpers); | ||
335 | smp_mb__after_atomic(); | ||
336 | } | ||
337 | |||
338 | static void helper_unlock(void) | ||
339 | { | ||
340 | if (atomic_dec_and_test(&running_helpers)) | ||
341 | wake_up(&running_helpers_waitq); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * call_usermodehelper_setup - prepare to call a usermode helper | ||
346 | * @path: path to usermode executable | ||
347 | * @argv: arg vector for process | ||
348 | * @envp: environment for process | ||
349 | * @gfp_mask: gfp mask for memory allocation | ||
350 | * @cleanup: a cleanup function | ||
351 | * @init: an init function | ||
352 | * @data: arbitrary context sensitive data | ||
353 | * | ||
354 | * Returns either %NULL on allocation failure, or a subprocess_info | ||
355 | * structure. This should be passed to call_usermodehelper_exec to | ||
356 | * exec the process and free the structure. | ||
357 | * | ||
358 | * The init function is used to customize the helper process prior to | ||
359 | * exec. A non-zero return code causes the process to error out, exit, | ||
360 | * and return the failure to the calling process | ||
361 | * | ||
362 | * The cleanup function is just before ethe subprocess_info is about to | ||
363 | * be freed. This can be used for freeing the argv and envp. The | ||
364 | * Function must be runnable in either a process context or the | ||
365 | * context in which call_usermodehelper_exec is called. | ||
366 | */ | ||
367 | struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv, | ||
368 | char **envp, gfp_t gfp_mask, | ||
369 | int (*init)(struct subprocess_info *info, struct cred *new), | ||
370 | void (*cleanup)(struct subprocess_info *info), | ||
371 | void *data) | ||
372 | { | ||
373 | struct subprocess_info *sub_info; | ||
374 | sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); | ||
375 | if (!sub_info) | ||
376 | goto out; | ||
377 | |||
378 | INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); | ||
379 | |||
380 | #ifdef CONFIG_STATIC_USERMODEHELPER | ||
381 | sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH; | ||
382 | #else | ||
383 | sub_info->path = path; | ||
384 | #endif | ||
385 | sub_info->argv = argv; | ||
386 | sub_info->envp = envp; | ||
387 | |||
388 | sub_info->cleanup = cleanup; | ||
389 | sub_info->init = init; | ||
390 | sub_info->data = data; | ||
391 | out: | ||
392 | return sub_info; | ||
393 | } | ||
394 | EXPORT_SYMBOL(call_usermodehelper_setup); | ||
395 | |||
396 | /** | ||
397 | * call_usermodehelper_exec - start a usermode application | ||
398 | * @sub_info: information about the subprocessa | ||
399 | * @wait: wait for the application to finish and return status. | ||
400 | * when UMH_NO_WAIT don't wait at all, but you get no useful error back | ||
401 | * when the program couldn't be exec'ed. This makes it safe to call | ||
402 | * from interrupt context. | ||
403 | * | ||
404 | * Runs a user-space application. The application is started | ||
405 | * asynchronously if wait is not set, and runs as a child of system workqueues. | ||
406 | * (ie. it runs with full root capabilities and optimized affinity). | ||
407 | */ | ||
408 | int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | ||
409 | { | ||
410 | DECLARE_COMPLETION_ONSTACK(done); | ||
411 | int retval = 0; | ||
412 | |||
413 | if (!sub_info->path) { | ||
414 | call_usermodehelper_freeinfo(sub_info); | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | helper_lock(); | ||
418 | if (usermodehelper_disabled) { | ||
419 | retval = -EBUSY; | ||
420 | goto out; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * If there is no binary for us to call, then just return and get out of | ||
425 | * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and | ||
426 | * disable all call_usermodehelper() calls. | ||
427 | */ | ||
428 | if (strlen(sub_info->path) == 0) | ||
429 | goto out; | ||
430 | |||
431 | /* | ||
432 | * Set the completion pointer only if there is a waiter. | ||
433 | * This makes it possible to use umh_complete to free | ||
434 | * the data structure in case of UMH_NO_WAIT. | ||
435 | */ | ||
436 | sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; | ||
437 | sub_info->wait = wait; | ||
438 | |||
439 | queue_work(system_unbound_wq, &sub_info->work); | ||
440 | if (wait == UMH_NO_WAIT) /* task has freed sub_info */ | ||
441 | goto unlock; | ||
442 | |||
443 | if (wait & UMH_KILLABLE) { | ||
444 | retval = wait_for_completion_killable(&done); | ||
445 | if (!retval) | ||
446 | goto wait_done; | ||
447 | |||
448 | /* umh_complete() will see NULL and free sub_info */ | ||
449 | if (xchg(&sub_info->complete, NULL)) | ||
450 | goto unlock; | ||
451 | /* fallthrough, umh_complete() was already called */ | ||
452 | } | ||
453 | |||
454 | wait_for_completion(&done); | ||
455 | wait_done: | ||
456 | retval = sub_info->retval; | ||
457 | out: | ||
458 | call_usermodehelper_freeinfo(sub_info); | ||
459 | unlock: | ||
460 | helper_unlock(); | ||
461 | return retval; | ||
462 | } | ||
463 | EXPORT_SYMBOL(call_usermodehelper_exec); | ||
464 | |||
465 | /** | ||
466 | * call_usermodehelper() - prepare and start a usermode application | ||
467 | * @path: path to usermode executable | ||
468 | * @argv: arg vector for process | ||
469 | * @envp: environment for process | ||
470 | * @wait: wait for the application to finish and return status. | ||
471 | * when UMH_NO_WAIT don't wait at all, but you get no useful error back | ||
472 | * when the program couldn't be exec'ed. This makes it safe to call | ||
473 | * from interrupt context. | ||
474 | * | ||
475 | * This function is the equivalent to use call_usermodehelper_setup() and | ||
476 | * call_usermodehelper_exec(). | ||
477 | */ | ||
478 | int call_usermodehelper(const char *path, char **argv, char **envp, int wait) | ||
479 | { | ||
480 | struct subprocess_info *info; | ||
481 | gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; | ||
482 | |||
483 | info = call_usermodehelper_setup(path, argv, envp, gfp_mask, | ||
484 | NULL, NULL, NULL); | ||
485 | if (info == NULL) | ||
486 | return -ENOMEM; | ||
487 | |||
488 | return call_usermodehelper_exec(info, wait); | ||
489 | } | ||
490 | EXPORT_SYMBOL(call_usermodehelper); | ||
491 | |||
492 | static int proc_cap_handler(struct ctl_table *table, int write, | ||
493 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
494 | { | ||
495 | struct ctl_table t; | ||
496 | unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; | ||
497 | kernel_cap_t new_cap; | ||
498 | int err, i; | ||
499 | |||
500 | if (write && (!capable(CAP_SETPCAP) || | ||
501 | !capable(CAP_SYS_MODULE))) | ||
502 | return -EPERM; | ||
503 | |||
504 | /* | ||
505 | * convert from the global kernel_cap_t to the ulong array to print to | ||
506 | * userspace if this is a read. | ||
507 | */ | ||
508 | spin_lock(&umh_sysctl_lock); | ||
509 | for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { | ||
510 | if (table->data == CAP_BSET) | ||
511 | cap_array[i] = usermodehelper_bset.cap[i]; | ||
512 | else if (table->data == CAP_PI) | ||
513 | cap_array[i] = usermodehelper_inheritable.cap[i]; | ||
514 | else | ||
515 | BUG(); | ||
516 | } | ||
517 | spin_unlock(&umh_sysctl_lock); | ||
518 | |||
519 | t = *table; | ||
520 | t.data = &cap_array; | ||
521 | |||
522 | /* | ||
523 | * actually read or write and array of ulongs from userspace. Remember | ||
524 | * these are least significant 32 bits first | ||
525 | */ | ||
526 | err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); | ||
527 | if (err < 0) | ||
528 | return err; | ||
529 | |||
530 | /* | ||
531 | * convert from the sysctl array of ulongs to the kernel_cap_t | ||
532 | * internal representation | ||
533 | */ | ||
534 | for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) | ||
535 | new_cap.cap[i] = cap_array[i]; | ||
536 | |||
537 | /* | ||
538 | * Drop everything not in the new_cap (but don't add things) | ||
539 | */ | ||
540 | spin_lock(&umh_sysctl_lock); | ||
541 | if (write) { | ||
542 | if (table->data == CAP_BSET) | ||
543 | usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); | ||
544 | if (table->data == CAP_PI) | ||
545 | usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); | ||
546 | } | ||
547 | spin_unlock(&umh_sysctl_lock); | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | struct ctl_table usermodehelper_table[] = { | ||
553 | { | ||
554 | .procname = "bset", | ||
555 | .data = CAP_BSET, | ||
556 | .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), | ||
557 | .mode = 0600, | ||
558 | .proc_handler = proc_cap_handler, | ||
559 | }, | ||
560 | { | ||
561 | .procname = "inheritable", | ||
562 | .data = CAP_PI, | ||
563 | .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), | ||
564 | .mode = 0600, | ||
565 | .proc_handler = proc_cap_handler, | ||
566 | }, | ||
567 | { } | ||
568 | }; | ||