diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 11:06:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 11:06:39 -0400 |
commit | bb2cbf5e9367d8598fecd0c48dead69560750223 (patch) | |
tree | fb2c620451b90f41a31726bdd82077813f941e39 /kernel | |
parent | e7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (diff) | |
parent | 478d085524c57cf4283699f529d5a4c22188ea69 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security
Pull security subsystem updates from James Morris:
"In this release:
- PKCS#7 parser for the key management subsystem from David Howells
- appoint Kees Cook as seccomp maintainer
- bugfixes and general maintenance across the subsystem"
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security: (94 commits)
X.509: Need to export x509_request_asymmetric_key()
netlabel: shorter names for the NetLabel catmap funcs/structs
netlabel: fix the catmap walking functions
netlabel: fix the horribly broken catmap functions
netlabel: fix a problem when setting bits below the previously lowest bit
PKCS#7: X.509 certificate issuer and subject are mandatory fields in the ASN.1
tpm: simplify code by using %*phN specifier
tpm: Provide a generic means to override the chip returned timeouts
tpm: missing tpm_chip_put in tpm_get_random()
tpm: Properly clean sysfs entries in error path
tpm: Add missing tpm_do_selftest to ST33 I2C driver
PKCS#7: Use x509_request_asymmetric_key()
Revert "selinux: fix the default socket labeling in sock_graft()"
X.509: x509_request_asymmetric_keys() doesn't need string length arguments
PKCS#7: fix sparse non static symbol warning
KEYS: revert encrypted key change
ima: add support for measuring and appraising firmware
firmware_class: perform new LSM checks
security: introduce kernel_fw_from_file hook
PKCS#7: Missing inclusion of linux/err.h
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 2 | ||||
-rw-r--r-- | kernel/capability.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 49 | ||||
-rw-r--r-- | kernel/seccomp.c | 412 | ||||
-rw-r--r-- | kernel/sys.c | 4 | ||||
-rw-r--r-- | kernel/sys_ni.c | 3 | ||||
-rw-r--r-- | kernel/system_keyring.c | 1 |
7 files changed, 412 insertions, 63 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 3ef2e0e797e8..ba2ff5a5c600 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -1677,7 +1677,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) | |||
1677 | audit_log_format(ab, " %s=", prefix); | 1677 | audit_log_format(ab, " %s=", prefix); |
1678 | CAP_FOR_EACH_U32(i) { | 1678 | CAP_FOR_EACH_U32(i) { |
1679 | audit_log_format(ab, "%08x", | 1679 | audit_log_format(ab, "%08x", |
1680 | cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); | 1680 | cap->cap[CAP_LAST_U32 - i]); |
1681 | } | 1681 | } |
1682 | } | 1682 | } |
1683 | 1683 | ||
diff --git a/kernel/capability.c b/kernel/capability.c index a5cf13c018ce..989f5bfc57dc 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -258,6 +258,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) | |||
258 | i++; | 258 | i++; |
259 | } | 259 | } |
260 | 260 | ||
261 | effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; | ||
262 | permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; | ||
263 | inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; | ||
264 | |||
261 | new = prepare_creds(); | 265 | new = prepare_creds(); |
262 | if (!new) | 266 | if (!new) |
263 | return -ENOMEM; | 267 | return -ENOMEM; |
diff --git a/kernel/fork.c b/kernel/fork.c index 5f1bf3bebb4f..fbd3497b221f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -315,6 +315,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
315 | goto free_ti; | 315 | goto free_ti; |
316 | 316 | ||
317 | tsk->stack = ti; | 317 | tsk->stack = ti; |
318 | #ifdef CONFIG_SECCOMP | ||
319 | /* | ||
320 | * We must handle setting up seccomp filters once we're under | ||
321 | * the sighand lock in case orig has changed between now and | ||
322 | * then. Until then, filter must be NULL to avoid messing up | ||
323 | * the usage counts on the error path calling free_task. | ||
324 | */ | ||
325 | tsk->seccomp.filter = NULL; | ||
326 | #endif | ||
318 | 327 | ||
319 | setup_thread_stack(tsk, orig); | 328 | setup_thread_stack(tsk, orig); |
320 | clear_user_return_notifier(tsk); | 329 | clear_user_return_notifier(tsk); |
@@ -1081,6 +1090,39 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
1081 | return 0; | 1090 | return 0; |
1082 | } | 1091 | } |
1083 | 1092 | ||
1093 | static void copy_seccomp(struct task_struct *p) | ||
1094 | { | ||
1095 | #ifdef CONFIG_SECCOMP | ||
1096 | /* | ||
1097 | * Must be called with sighand->lock held, which is common to | ||
1098 | * all threads in the group. Holding cred_guard_mutex is not | ||
1099 | * needed because this new task is not yet running and cannot | ||
1100 | * be racing exec. | ||
1101 | */ | ||
1102 | BUG_ON(!spin_is_locked(¤t->sighand->siglock)); | ||
1103 | |||
1104 | /* Ref-count the new filter user, and assign it. */ | ||
1105 | get_seccomp_filter(current); | ||
1106 | p->seccomp = current->seccomp; | ||
1107 | |||
1108 | /* | ||
1109 | * Explicitly enable no_new_privs here in case it got set | ||
1110 | * between the task_struct being duplicated and holding the | ||
1111 | * sighand lock. The seccomp state and nnp must be in sync. | ||
1112 | */ | ||
1113 | if (task_no_new_privs(current)) | ||
1114 | task_set_no_new_privs(p); | ||
1115 | |||
1116 | /* | ||
1117 | * If the parent gained a seccomp mode after copying thread | ||
1118 | * flags and between before we held the sighand lock, we have | ||
1119 | * to manually enable the seccomp thread flag here. | ||
1120 | */ | ||
1121 | if (p->seccomp.mode != SECCOMP_MODE_DISABLED) | ||
1122 | set_tsk_thread_flag(p, TIF_SECCOMP); | ||
1123 | #endif | ||
1124 | } | ||
1125 | |||
1084 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | 1126 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
1085 | { | 1127 | { |
1086 | current->clear_child_tid = tidptr; | 1128 | current->clear_child_tid = tidptr; |
@@ -1195,7 +1237,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1195 | goto fork_out; | 1237 | goto fork_out; |
1196 | 1238 | ||
1197 | ftrace_graph_init_task(p); | 1239 | ftrace_graph_init_task(p); |
1198 | get_seccomp_filter(p); | ||
1199 | 1240 | ||
1200 | rt_mutex_init_task(p); | 1241 | rt_mutex_init_task(p); |
1201 | 1242 | ||
@@ -1435,6 +1476,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1435 | spin_lock(¤t->sighand->siglock); | 1476 | spin_lock(¤t->sighand->siglock); |
1436 | 1477 | ||
1437 | /* | 1478 | /* |
1479 | * Copy seccomp details explicitly here, in case they were changed | ||
1480 | * before holding sighand lock. | ||
1481 | */ | ||
1482 | copy_seccomp(p); | ||
1483 | |||
1484 | /* | ||
1438 | * Process group and session signals need to be delivered to just the | 1485 | * Process group and session signals need to be delivered to just the |
1439 | * parent before the fork or both the parent and the child after the | 1486 | * parent before the fork or both the parent and the child after the |
1440 | * fork. Restart if a signal comes in before we add the new process to | 1487 | * fork. Restart if a signal comes in before we add the new process to |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 301bbc24739c..74f460179171 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -18,15 +18,17 @@ | |||
18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/seccomp.h> | 20 | #include <linux/seccomp.h> |
21 | #include <linux/slab.h> | ||
22 | #include <linux/syscalls.h> | ||
21 | 23 | ||
22 | /* #define SECCOMP_DEBUG 1 */ | 24 | /* #define SECCOMP_DEBUG 1 */ |
23 | 25 | ||
24 | #ifdef CONFIG_SECCOMP_FILTER | 26 | #ifdef CONFIG_SECCOMP_FILTER |
25 | #include <asm/syscall.h> | 27 | #include <asm/syscall.h> |
26 | #include <linux/filter.h> | 28 | #include <linux/filter.h> |
29 | #include <linux/pid.h> | ||
27 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
28 | #include <linux/security.h> | 31 | #include <linux/security.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/tracehook.h> | 32 | #include <linux/tracehook.h> |
31 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
32 | 34 | ||
@@ -172,21 +174,24 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
172 | */ | 174 | */ |
173 | static u32 seccomp_run_filters(int syscall) | 175 | static u32 seccomp_run_filters(int syscall) |
174 | { | 176 | { |
175 | struct seccomp_filter *f; | 177 | struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter); |
176 | struct seccomp_data sd; | 178 | struct seccomp_data sd; |
177 | u32 ret = SECCOMP_RET_ALLOW; | 179 | u32 ret = SECCOMP_RET_ALLOW; |
178 | 180 | ||
179 | /* Ensure unexpected behavior doesn't result in failing open. */ | 181 | /* Ensure unexpected behavior doesn't result in failing open. */ |
180 | if (WARN_ON(current->seccomp.filter == NULL)) | 182 | if (unlikely(WARN_ON(f == NULL))) |
181 | return SECCOMP_RET_KILL; | 183 | return SECCOMP_RET_KILL; |
182 | 184 | ||
185 | /* Make sure cross-thread synced filter points somewhere sane. */ | ||
186 | smp_read_barrier_depends(); | ||
187 | |||
183 | populate_seccomp_data(&sd); | 188 | populate_seccomp_data(&sd); |
184 | 189 | ||
185 | /* | 190 | /* |
186 | * All filters in the list are evaluated and the lowest BPF return | 191 | * All filters in the list are evaluated and the lowest BPF return |
187 | * value always takes priority (ignoring the DATA). | 192 | * value always takes priority (ignoring the DATA). |
188 | */ | 193 | */ |
189 | for (f = current->seccomp.filter; f; f = f->prev) { | 194 | for (; f; f = f->prev) { |
190 | u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); | 195 | u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); |
191 | 196 | ||
192 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | 197 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) |
@@ -194,29 +199,159 @@ static u32 seccomp_run_filters(int syscall) | |||
194 | } | 199 | } |
195 | return ret; | 200 | return ret; |
196 | } | 201 | } |
202 | #endif /* CONFIG_SECCOMP_FILTER */ | ||
203 | |||
204 | static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) | ||
205 | { | ||
206 | BUG_ON(!spin_is_locked(¤t->sighand->siglock)); | ||
207 | |||
208 | if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) | ||
209 | return false; | ||
210 | |||
211 | return true; | ||
212 | } | ||
213 | |||
214 | static inline void seccomp_assign_mode(struct task_struct *task, | ||
215 | unsigned long seccomp_mode) | ||
216 | { | ||
217 | BUG_ON(!spin_is_locked(&task->sighand->siglock)); | ||
218 | |||
219 | task->seccomp.mode = seccomp_mode; | ||
220 | /* | ||
221 | * Make sure TIF_SECCOMP cannot be set before the mode (and | ||
222 | * filter) is set. | ||
223 | */ | ||
224 | smp_mb__before_atomic(); | ||
225 | set_tsk_thread_flag(task, TIF_SECCOMP); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_SECCOMP_FILTER | ||
229 | /* Returns 1 if the parent is an ancestor of the child. */ | ||
230 | static int is_ancestor(struct seccomp_filter *parent, | ||
231 | struct seccomp_filter *child) | ||
232 | { | ||
233 | /* NULL is the root ancestor. */ | ||
234 | if (parent == NULL) | ||
235 | return 1; | ||
236 | for (; child; child = child->prev) | ||
237 | if (child == parent) | ||
238 | return 1; | ||
239 | return 0; | ||
240 | } | ||
197 | 241 | ||
198 | /** | 242 | /** |
199 | * seccomp_attach_filter: Attaches a seccomp filter to current. | 243 | * seccomp_can_sync_threads: checks if all threads can be synchronized |
244 | * | ||
245 | * Expects sighand and cred_guard_mutex locks to be held. | ||
246 | * | ||
247 | * Returns 0 on success, -ve on error, or the pid of a thread which was | ||
248 | * either not in the correct seccomp mode or it did not have an ancestral | ||
249 | * seccomp filter. | ||
250 | */ | ||
251 | static inline pid_t seccomp_can_sync_threads(void) | ||
252 | { | ||
253 | struct task_struct *thread, *caller; | ||
254 | |||
255 | BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); | ||
256 | BUG_ON(!spin_is_locked(¤t->sighand->siglock)); | ||
257 | |||
258 | /* Validate all threads being eligible for synchronization. */ | ||
259 | caller = current; | ||
260 | for_each_thread(caller, thread) { | ||
261 | pid_t failed; | ||
262 | |||
263 | /* Skip current, since it is initiating the sync. */ | ||
264 | if (thread == caller) | ||
265 | continue; | ||
266 | |||
267 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED || | ||
268 | (thread->seccomp.mode == SECCOMP_MODE_FILTER && | ||
269 | is_ancestor(thread->seccomp.filter, | ||
270 | caller->seccomp.filter))) | ||
271 | continue; | ||
272 | |||
273 | /* Return the first thread that cannot be synchronized. */ | ||
274 | failed = task_pid_vnr(thread); | ||
275 | /* If the pid cannot be resolved, then return -ESRCH */ | ||
276 | if (unlikely(WARN_ON(failed == 0))) | ||
277 | failed = -ESRCH; | ||
278 | return failed; | ||
279 | } | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * seccomp_sync_threads: sets all threads to use current's filter | ||
286 | * | ||
287 | * Expects sighand and cred_guard_mutex locks to be held, and for | ||
288 | * seccomp_can_sync_threads() to have returned success already | ||
289 | * without dropping the locks. | ||
290 | * | ||
291 | */ | ||
292 | static inline void seccomp_sync_threads(void) | ||
293 | { | ||
294 | struct task_struct *thread, *caller; | ||
295 | |||
296 | BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); | ||
297 | BUG_ON(!spin_is_locked(¤t->sighand->siglock)); | ||
298 | |||
299 | /* Synchronize all threads. */ | ||
300 | caller = current; | ||
301 | for_each_thread(caller, thread) { | ||
302 | /* Skip current, since it needs no changes. */ | ||
303 | if (thread == caller) | ||
304 | continue; | ||
305 | |||
306 | /* Get a task reference for the new leaf node. */ | ||
307 | get_seccomp_filter(caller); | ||
308 | /* | ||
309 | * Drop the task reference to the shared ancestor since | ||
310 | * current's path will hold a reference. (This also | ||
311 | * allows a put before the assignment.) | ||
312 | */ | ||
313 | put_seccomp_filter(thread); | ||
314 | smp_store_release(&thread->seccomp.filter, | ||
315 | caller->seccomp.filter); | ||
316 | /* | ||
317 | * Opt the other thread into seccomp if needed. | ||
318 | * As threads are considered to be trust-realm | ||
319 | * equivalent (see ptrace_may_access), it is safe to | ||
320 | * allow one thread to transition the other. | ||
321 | */ | ||
322 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) { | ||
323 | /* | ||
324 | * Don't let an unprivileged task work around | ||
325 | * the no_new_privs restriction by creating | ||
326 | * a thread that sets it up, enters seccomp, | ||
327 | * then dies. | ||
328 | */ | ||
329 | if (task_no_new_privs(caller)) | ||
330 | task_set_no_new_privs(thread); | ||
331 | |||
332 | seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); | ||
333 | } | ||
334 | } | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * seccomp_prepare_filter: Prepares a seccomp filter for use. | ||
200 | * @fprog: BPF program to install | 339 | * @fprog: BPF program to install |
201 | * | 340 | * |
202 | * Returns 0 on success or an errno on failure. | 341 | * Returns filter on success or an ERR_PTR on failure. |
203 | */ | 342 | */ |
204 | static long seccomp_attach_filter(struct sock_fprog *fprog) | 343 | static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) |
205 | { | 344 | { |
206 | struct seccomp_filter *filter; | 345 | struct seccomp_filter *filter; |
207 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); | 346 | unsigned long fp_size; |
208 | unsigned long total_insns = fprog->len; | ||
209 | struct sock_filter *fp; | 347 | struct sock_filter *fp; |
210 | int new_len; | 348 | int new_len; |
211 | long ret; | 349 | long ret; |
212 | 350 | ||
213 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) | 351 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) |
214 | return -EINVAL; | 352 | return ERR_PTR(-EINVAL); |
215 | 353 | BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter)); | |
216 | for (filter = current->seccomp.filter; filter; filter = filter->prev) | 354 | fp_size = fprog->len * sizeof(struct sock_filter); |
217 | total_insns += filter->prog->len + 4; /* include a 4 instr penalty */ | ||
218 | if (total_insns > MAX_INSNS_PER_PATH) | ||
219 | return -ENOMEM; | ||
220 | 355 | ||
221 | /* | 356 | /* |
222 | * Installing a seccomp filter requires that the task has | 357 | * Installing a seccomp filter requires that the task has |
@@ -224,14 +359,14 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
224 | * This avoids scenarios where unprivileged tasks can affect the | 359 | * This avoids scenarios where unprivileged tasks can affect the |
225 | * behavior of privileged children. | 360 | * behavior of privileged children. |
226 | */ | 361 | */ |
227 | if (!current->no_new_privs && | 362 | if (!task_no_new_privs(current) && |
228 | security_capable_noaudit(current_cred(), current_user_ns(), | 363 | security_capable_noaudit(current_cred(), current_user_ns(), |
229 | CAP_SYS_ADMIN) != 0) | 364 | CAP_SYS_ADMIN) != 0) |
230 | return -EACCES; | 365 | return ERR_PTR(-EACCES); |
231 | 366 | ||
232 | fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN); | 367 | fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN); |
233 | if (!fp) | 368 | if (!fp) |
234 | return -ENOMEM; | 369 | return ERR_PTR(-ENOMEM); |
235 | 370 | ||
236 | /* Copy the instructions from fprog. */ | 371 | /* Copy the instructions from fprog. */ |
237 | ret = -EFAULT; | 372 | ret = -EFAULT; |
@@ -275,13 +410,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
275 | 410 | ||
276 | sk_filter_select_runtime(filter->prog); | 411 | sk_filter_select_runtime(filter->prog); |
277 | 412 | ||
278 | /* | 413 | return filter; |
279 | * If there is an existing filter, make it the prev and don't drop its | ||
280 | * task reference. | ||
281 | */ | ||
282 | filter->prev = current->seccomp.filter; | ||
283 | current->seccomp.filter = filter; | ||
284 | return 0; | ||
285 | 414 | ||
286 | free_filter_prog: | 415 | free_filter_prog: |
287 | kfree(filter->prog); | 416 | kfree(filter->prog); |
@@ -289,19 +418,20 @@ free_filter: | |||
289 | kfree(filter); | 418 | kfree(filter); |
290 | free_prog: | 419 | free_prog: |
291 | kfree(fp); | 420 | kfree(fp); |
292 | return ret; | 421 | return ERR_PTR(ret); |
293 | } | 422 | } |
294 | 423 | ||
295 | /** | 424 | /** |
296 | * seccomp_attach_user_filter - attaches a user-supplied sock_fprog | 425 | * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog |
297 | * @user_filter: pointer to the user data containing a sock_fprog. | 426 | * @user_filter: pointer to the user data containing a sock_fprog. |
298 | * | 427 | * |
299 | * Returns 0 on success and non-zero otherwise. | 428 | * Returns 0 on success and non-zero otherwise. |
300 | */ | 429 | */ |
301 | static long seccomp_attach_user_filter(char __user *user_filter) | 430 | static struct seccomp_filter * |
431 | seccomp_prepare_user_filter(const char __user *user_filter) | ||
302 | { | 432 | { |
303 | struct sock_fprog fprog; | 433 | struct sock_fprog fprog; |
304 | long ret = -EFAULT; | 434 | struct seccomp_filter *filter = ERR_PTR(-EFAULT); |
305 | 435 | ||
306 | #ifdef CONFIG_COMPAT | 436 | #ifdef CONFIG_COMPAT |
307 | if (is_compat_task()) { | 437 | if (is_compat_task()) { |
@@ -314,9 +444,56 @@ static long seccomp_attach_user_filter(char __user *user_filter) | |||
314 | #endif | 444 | #endif |
315 | if (copy_from_user(&fprog, user_filter, sizeof(fprog))) | 445 | if (copy_from_user(&fprog, user_filter, sizeof(fprog))) |
316 | goto out; | 446 | goto out; |
317 | ret = seccomp_attach_filter(&fprog); | 447 | filter = seccomp_prepare_filter(&fprog); |
318 | out: | 448 | out: |
319 | return ret; | 449 | return filter; |
450 | } | ||
451 | |||
452 | /** | ||
453 | * seccomp_attach_filter: validate and attach filter | ||
454 | * @flags: flags to change filter behavior | ||
455 | * @filter: seccomp filter to add to the current process | ||
456 | * | ||
457 | * Caller must be holding current->sighand->siglock lock. | ||
458 | * | ||
459 | * Returns 0 on success, -ve on error. | ||
460 | */ | ||
461 | static long seccomp_attach_filter(unsigned int flags, | ||
462 | struct seccomp_filter *filter) | ||
463 | { | ||
464 | unsigned long total_insns; | ||
465 | struct seccomp_filter *walker; | ||
466 | |||
467 | BUG_ON(!spin_is_locked(¤t->sighand->siglock)); | ||
468 | |||
469 | /* Validate resulting filter length. */ | ||
470 | total_insns = filter->prog->len; | ||
471 | for (walker = current->seccomp.filter; walker; walker = walker->prev) | ||
472 | total_insns += walker->prog->len + 4; /* 4 instr penalty */ | ||
473 | if (total_insns > MAX_INSNS_PER_PATH) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | /* If thread sync has been requested, check that it is possible. */ | ||
477 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) { | ||
478 | int ret; | ||
479 | |||
480 | ret = seccomp_can_sync_threads(); | ||
481 | if (ret) | ||
482 | return ret; | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * If there is an existing filter, make it the prev and don't drop its | ||
487 | * task reference. | ||
488 | */ | ||
489 | filter->prev = current->seccomp.filter; | ||
490 | current->seccomp.filter = filter; | ||
491 | |||
492 | /* Now that the new filter is in place, synchronize to all threads. */ | ||
493 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) | ||
494 | seccomp_sync_threads(); | ||
495 | |||
496 | return 0; | ||
320 | } | 497 | } |
321 | 498 | ||
322 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ | 499 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ |
@@ -329,6 +506,14 @@ void get_seccomp_filter(struct task_struct *tsk) | |||
329 | atomic_inc(&orig->usage); | 506 | atomic_inc(&orig->usage); |
330 | } | 507 | } |
331 | 508 | ||
509 | static inline void seccomp_filter_free(struct seccomp_filter *filter) | ||
510 | { | ||
511 | if (filter) { | ||
512 | sk_filter_free(filter->prog); | ||
513 | kfree(filter); | ||
514 | } | ||
515 | } | ||
516 | |||
332 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ | 517 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ |
333 | void put_seccomp_filter(struct task_struct *tsk) | 518 | void put_seccomp_filter(struct task_struct *tsk) |
334 | { | 519 | { |
@@ -337,8 +522,7 @@ void put_seccomp_filter(struct task_struct *tsk) | |||
337 | while (orig && atomic_dec_and_test(&orig->usage)) { | 522 | while (orig && atomic_dec_and_test(&orig->usage)) { |
338 | struct seccomp_filter *freeme = orig; | 523 | struct seccomp_filter *freeme = orig; |
339 | orig = orig->prev; | 524 | orig = orig->prev; |
340 | sk_filter_free(freeme->prog); | 525 | seccomp_filter_free(freeme); |
341 | kfree(freeme); | ||
342 | } | 526 | } |
343 | } | 527 | } |
344 | 528 | ||
@@ -382,12 +566,17 @@ static int mode1_syscalls_32[] = { | |||
382 | 566 | ||
383 | int __secure_computing(int this_syscall) | 567 | int __secure_computing(int this_syscall) |
384 | { | 568 | { |
385 | int mode = current->seccomp.mode; | ||
386 | int exit_sig = 0; | 569 | int exit_sig = 0; |
387 | int *syscall; | 570 | int *syscall; |
388 | u32 ret; | 571 | u32 ret; |
389 | 572 | ||
390 | switch (mode) { | 573 | /* |
574 | * Make sure that any changes to mode from another thread have | ||
575 | * been seen after TIF_SECCOMP was seen. | ||
576 | */ | ||
577 | rmb(); | ||
578 | |||
579 | switch (current->seccomp.mode) { | ||
391 | case SECCOMP_MODE_STRICT: | 580 | case SECCOMP_MODE_STRICT: |
392 | syscall = mode1_syscalls; | 581 | syscall = mode1_syscalls; |
393 | #ifdef CONFIG_COMPAT | 582 | #ifdef CONFIG_COMPAT |
@@ -473,47 +662,152 @@ long prctl_get_seccomp(void) | |||
473 | } | 662 | } |
474 | 663 | ||
475 | /** | 664 | /** |
476 | * prctl_set_seccomp: configures current->seccomp.mode | 665 | * seccomp_set_mode_strict: internal function for setting strict seccomp |
477 | * @seccomp_mode: requested mode to use | ||
478 | * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER | ||
479 | * | 666 | * |
480 | * This function may be called repeatedly with a @seccomp_mode of | 667 | * Once current->seccomp.mode is non-zero, it may not be changed. |
481 | * SECCOMP_MODE_FILTER to install additional filters. Every filter | 668 | * |
482 | * successfully installed will be evaluated (in reverse order) for each system | 669 | * Returns 0 on success or -EINVAL on failure. |
483 | * call the task makes. | 670 | */ |
671 | static long seccomp_set_mode_strict(void) | ||
672 | { | ||
673 | const unsigned long seccomp_mode = SECCOMP_MODE_STRICT; | ||
674 | long ret = -EINVAL; | ||
675 | |||
676 | spin_lock_irq(¤t->sighand->siglock); | ||
677 | |||
678 | if (!seccomp_may_assign_mode(seccomp_mode)) | ||
679 | goto out; | ||
680 | |||
681 | #ifdef TIF_NOTSC | ||
682 | disable_TSC(); | ||
683 | #endif | ||
684 | seccomp_assign_mode(current, seccomp_mode); | ||
685 | ret = 0; | ||
686 | |||
687 | out: | ||
688 | spin_unlock_irq(¤t->sighand->siglock); | ||
689 | |||
690 | return ret; | ||
691 | } | ||
692 | |||
693 | #ifdef CONFIG_SECCOMP_FILTER | ||
694 | /** | ||
695 | * seccomp_set_mode_filter: internal function for setting seccomp filter | ||
696 | * @flags: flags to change filter behavior | ||
697 | * @filter: struct sock_fprog containing filter | ||
698 | * | ||
699 | * This function may be called repeatedly to install additional filters. | ||
700 | * Every filter successfully installed will be evaluated (in reverse order) | ||
701 | * for each system call the task makes. | ||
484 | * | 702 | * |
485 | * Once current->seccomp.mode is non-zero, it may not be changed. | 703 | * Once current->seccomp.mode is non-zero, it may not be changed. |
486 | * | 704 | * |
487 | * Returns 0 on success or -EINVAL on failure. | 705 | * Returns 0 on success or -EINVAL on failure. |
488 | */ | 706 | */ |
489 | long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) | 707 | static long seccomp_set_mode_filter(unsigned int flags, |
708 | const char __user *filter) | ||
490 | { | 709 | { |
710 | const unsigned long seccomp_mode = SECCOMP_MODE_FILTER; | ||
711 | struct seccomp_filter *prepared = NULL; | ||
491 | long ret = -EINVAL; | 712 | long ret = -EINVAL; |
492 | 713 | ||
493 | if (current->seccomp.mode && | 714 | /* Validate flags. */ |
494 | current->seccomp.mode != seccomp_mode) | 715 | if (flags & ~SECCOMP_FILTER_FLAG_MASK) |
716 | return -EINVAL; | ||
717 | |||
718 | /* Prepare the new filter before holding any locks. */ | ||
719 | prepared = seccomp_prepare_user_filter(filter); | ||
720 | if (IS_ERR(prepared)) | ||
721 | return PTR_ERR(prepared); | ||
722 | |||
723 | /* | ||
724 | * Make sure we cannot change seccomp or nnp state via TSYNC | ||
725 | * while another thread is in the middle of calling exec. | ||
726 | */ | ||
727 | if (flags & SECCOMP_FILTER_FLAG_TSYNC && | ||
728 | mutex_lock_killable(¤t->signal->cred_guard_mutex)) | ||
729 | goto out_free; | ||
730 | |||
731 | spin_lock_irq(¤t->sighand->siglock); | ||
732 | |||
733 | if (!seccomp_may_assign_mode(seccomp_mode)) | ||
734 | goto out; | ||
735 | |||
736 | ret = seccomp_attach_filter(flags, prepared); | ||
737 | if (ret) | ||
495 | goto out; | 738 | goto out; |
739 | /* Do not free the successfully attached filter. */ | ||
740 | prepared = NULL; | ||
741 | |||
742 | seccomp_assign_mode(current, seccomp_mode); | ||
743 | out: | ||
744 | spin_unlock_irq(¤t->sighand->siglock); | ||
745 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) | ||
746 | mutex_unlock(¤t->signal->cred_guard_mutex); | ||
747 | out_free: | ||
748 | seccomp_filter_free(prepared); | ||
749 | return ret; | ||
750 | } | ||
751 | #else | ||
752 | static inline long seccomp_set_mode_filter(unsigned int flags, | ||
753 | const char __user *filter) | ||
754 | { | ||
755 | return -EINVAL; | ||
756 | } | ||
757 | #endif | ||
758 | |||
759 | /* Common entry point for both prctl and syscall. */ | ||
760 | static long do_seccomp(unsigned int op, unsigned int flags, | ||
761 | const char __user *uargs) | ||
762 | { | ||
763 | switch (op) { | ||
764 | case SECCOMP_SET_MODE_STRICT: | ||
765 | if (flags != 0 || uargs != NULL) | ||
766 | return -EINVAL; | ||
767 | return seccomp_set_mode_strict(); | ||
768 | case SECCOMP_SET_MODE_FILTER: | ||
769 | return seccomp_set_mode_filter(flags, uargs); | ||
770 | default: | ||
771 | return -EINVAL; | ||
772 | } | ||
773 | } | ||
774 | |||
775 | SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, | ||
776 | const char __user *, uargs) | ||
777 | { | ||
778 | return do_seccomp(op, flags, uargs); | ||
779 | } | ||
780 | |||
781 | /** | ||
782 | * prctl_set_seccomp: configures current->seccomp.mode | ||
783 | * @seccomp_mode: requested mode to use | ||
784 | * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER | ||
785 | * | ||
786 | * Returns 0 on success or -EINVAL on failure. | ||
787 | */ | ||
788 | long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) | ||
789 | { | ||
790 | unsigned int op; | ||
791 | char __user *uargs; | ||
496 | 792 | ||
497 | switch (seccomp_mode) { | 793 | switch (seccomp_mode) { |
498 | case SECCOMP_MODE_STRICT: | 794 | case SECCOMP_MODE_STRICT: |
499 | ret = 0; | 795 | op = SECCOMP_SET_MODE_STRICT; |
500 | #ifdef TIF_NOTSC | 796 | /* |
501 | disable_TSC(); | 797 | * Setting strict mode through prctl always ignored filter, |
502 | #endif | 798 | * so make sure it is always NULL here to pass the internal |
799 | * check in do_seccomp(). | ||
800 | */ | ||
801 | uargs = NULL; | ||
503 | break; | 802 | break; |
504 | #ifdef CONFIG_SECCOMP_FILTER | ||
505 | case SECCOMP_MODE_FILTER: | 803 | case SECCOMP_MODE_FILTER: |
506 | ret = seccomp_attach_user_filter(filter); | 804 | op = SECCOMP_SET_MODE_FILTER; |
507 | if (ret) | 805 | uargs = filter; |
508 | goto out; | ||
509 | break; | 806 | break; |
510 | #endif | ||
511 | default: | 807 | default: |
512 | goto out; | 808 | return -EINVAL; |
513 | } | 809 | } |
514 | 810 | ||
515 | current->seccomp.mode = seccomp_mode; | 811 | /* prctl interface doesn't have flags, so they are always zero. */ |
516 | set_thread_flag(TIF_SECCOMP); | 812 | return do_seccomp(op, 0, uargs); |
517 | out: | ||
518 | return ret; | ||
519 | } | 813 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 66a751ebf9d9..ce8129192a26 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1990,12 +1990,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1990 | if (arg2 != 1 || arg3 || arg4 || arg5) | 1990 | if (arg2 != 1 || arg3 || arg4 || arg5) |
1991 | return -EINVAL; | 1991 | return -EINVAL; |
1992 | 1992 | ||
1993 | current->no_new_privs = 1; | 1993 | task_set_no_new_privs(current); |
1994 | break; | 1994 | break; |
1995 | case PR_GET_NO_NEW_PRIVS: | 1995 | case PR_GET_NO_NEW_PRIVS: |
1996 | if (arg2 || arg3 || arg4 || arg5) | 1996 | if (arg2 || arg3 || arg4 || arg5) |
1997 | return -EINVAL; | 1997 | return -EINVAL; |
1998 | return current->no_new_privs ? 1 : 0; | 1998 | return task_no_new_privs(current) ? 1 : 0; |
1999 | case PR_GET_THP_DISABLE: | 1999 | case PR_GET_THP_DISABLE: |
2000 | if (arg2 || arg3 || arg4 || arg5) | 2000 | if (arg2 || arg3 || arg4 || arg5) |
2001 | return -EINVAL; | 2001 | return -EINVAL; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 36441b51b5df..2904a2105914 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -213,3 +213,6 @@ cond_syscall(compat_sys_open_by_handle_at); | |||
213 | 213 | ||
214 | /* compare kernel pointers */ | 214 | /* compare kernel pointers */ |
215 | cond_syscall(sys_kcmp); | 215 | cond_syscall(sys_kcmp); |
216 | |||
217 | /* operate on Secure Computing state */ | ||
218 | cond_syscall(sys_seccomp); | ||
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c index 52ebc70263f4..875f64e8935b 100644 --- a/kernel/system_keyring.c +++ b/kernel/system_keyring.c | |||
@@ -89,6 +89,7 @@ static __init int load_system_certificate_list(void) | |||
89 | pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", | 89 | pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", |
90 | PTR_ERR(key)); | 90 | PTR_ERR(key)); |
91 | } else { | 91 | } else { |
92 | set_bit(KEY_FLAG_BUILTIN, &key_ref_to_ptr(key)->flags); | ||
92 | pr_notice("Loaded X.509 cert '%s'\n", | 93 | pr_notice("Loaded X.509 cert '%s'\n", |
93 | key_ref_to_ptr(key)->description); | 94 | key_ref_to_ptr(key)->description); |
94 | key_ref_put(key); | 95 | key_ref_put(key); |