diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 5 | ||||
-rw-r--r-- | kernel/acct.c | 7 | ||||
-rw-r--r-- | kernel/auditsc.c | 255 | ||||
-rw-r--r-- | kernel/capability.c | 288 | ||||
-rw-r--r-- | kernel/cgroup.c | 17 | ||||
-rw-r--r-- | kernel/cred-internals.h | 21 | ||||
-rw-r--r-- | kernel/cred.c | 588 | ||||
-rw-r--r-- | kernel/delayacct.c | 2 | ||||
-rw-r--r-- | kernel/exit.c | 23 | ||||
-rw-r--r-- | kernel/fork.c | 62 | ||||
-rw-r--r-- | kernel/futex.c | 20 | ||||
-rw-r--r-- | kernel/futex_compat.c | 7 | ||||
-rw-r--r-- | kernel/kallsyms.c | 16 | ||||
-rw-r--r-- | kernel/kmod.c | 30 | ||||
-rw-r--r-- | kernel/nsproxy.c | 15 | ||||
-rw-r--r-- | kernel/ptrace.c | 29 | ||||
-rw-r--r-- | kernel/sched.c | 393 | ||||
-rw-r--r-- | kernel/sched_debug.c | 57 | ||||
-rw-r--r-- | kernel/sched_fair.c | 9 | ||||
-rw-r--r-- | kernel/sched_rt.c | 9 | ||||
-rw-r--r-- | kernel/sched_stats.h | 5 | ||||
-rw-r--r-- | kernel/signal.c | 60 | ||||
-rw-r--r-- | kernel/sys.c | 586 | ||||
-rw-r--r-- | kernel/sysctl.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 | ||||
-rw-r--r-- | kernel/tsacct.c | 6 | ||||
-rw-r--r-- | kernel/uid16.c | 31 | ||||
-rw-r--r-- | kernel/user.c | 98 | ||||
-rw-r--r-- | kernel/user_namespace.c | 65 | ||||
-rw-r--r-- | kernel/workqueue.c | 8 |
32 files changed, 1747 insertions, 983 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 19fad003b19d..027edda63511 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o |
13 | 13 | ||
14 | ifdef CONFIG_FUNCTION_TRACER | 14 | ifdef CONFIG_FUNCTION_TRACER |
15 | # Do not trace debug files and internal ftrace files | 15 | # Do not trace debug files and internal ftrace files |
@@ -19,7 +19,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
20 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
21 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
22 | CFLAGS_REMOVE_sched.o = -pg | ||
23 | endif | 22 | endif |
24 | 23 | ||
25 | obj-$(CONFIG_FREEZER) += freezer.o | 24 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -90,7 +89,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ | |||
90 | obj-$(CONFIG_TRACING) += trace/ | 89 | obj-$(CONFIG_TRACING) += trace/ |
91 | obj-$(CONFIG_SMP) += sched_cpupri.o | 90 | obj-$(CONFIG_SMP) += sched_cpupri.o |
92 | 91 | ||
93 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) | 92 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
94 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 93 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
95 | # needed for x86 only. Why this used to be enabled for all architectures is beyond | 94 | # needed for x86 only. Why this used to be enabled for all architectures is beyond |
96 | # me. I suspect most platforms don't need this, but until we know that for sure | 95 | # me. I suspect most platforms don't need this, but until we know that for sure |
diff --git a/kernel/acct.c b/kernel/acct.c index f6006a60df5d..d57b7cbb98b6 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -530,15 +530,14 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
530 | do_div(elapsed, AHZ); | 530 | do_div(elapsed, AHZ); |
531 | ac.ac_btime = get_seconds() - elapsed; | 531 | ac.ac_btime = get_seconds() - elapsed; |
532 | /* we really need to bite the bullet and change layout */ | 532 | /* we really need to bite the bullet and change layout */ |
533 | ac.ac_uid = current->uid; | 533 | current_uid_gid(&ac.ac_uid, &ac.ac_gid); |
534 | ac.ac_gid = current->gid; | ||
535 | #if ACCT_VERSION==2 | 534 | #if ACCT_VERSION==2 |
536 | ac.ac_ahz = AHZ; | 535 | ac.ac_ahz = AHZ; |
537 | #endif | 536 | #endif |
538 | #if ACCT_VERSION==1 || ACCT_VERSION==2 | 537 | #if ACCT_VERSION==1 || ACCT_VERSION==2 |
539 | /* backward-compatible 16 bit fields */ | 538 | /* backward-compatible 16 bit fields */ |
540 | ac.ac_uid16 = current->uid; | 539 | ac.ac_uid16 = ac.ac_uid; |
541 | ac.ac_gid16 = current->gid; | 540 | ac.ac_gid16 = ac.ac_gid; |
542 | #endif | 541 | #endif |
543 | #if ACCT_VERSION==3 | 542 | #if ACCT_VERSION==3 |
544 | ac.ac_pid = task_tgid_nr_ns(current, ns); | 543 | ac.ac_pid = task_tgid_nr_ns(current, ns); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 2a3f0afc4d2a..4819f3711973 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/highmem.h> | 65 | #include <linux/highmem.h> |
66 | #include <linux/syscalls.h> | 66 | #include <linux/syscalls.h> |
67 | #include <linux/inotify.h> | 67 | #include <linux/inotify.h> |
68 | #include <linux/capability.h> | ||
68 | 69 | ||
69 | #include "audit.h" | 70 | #include "audit.h" |
70 | 71 | ||
@@ -84,6 +85,15 @@ int audit_n_rules; | |||
84 | /* determines whether we collect data for signals sent */ | 85 | /* determines whether we collect data for signals sent */ |
85 | int audit_signals; | 86 | int audit_signals; |
86 | 87 | ||
88 | struct audit_cap_data { | ||
89 | kernel_cap_t permitted; | ||
90 | kernel_cap_t inheritable; | ||
91 | union { | ||
92 | unsigned int fE; /* effective bit of a file capability */ | ||
93 | kernel_cap_t effective; /* effective set of a process */ | ||
94 | }; | ||
95 | }; | ||
96 | |||
87 | /* When fs/namei.c:getname() is called, we store the pointer in name and | 97 | /* When fs/namei.c:getname() is called, we store the pointer in name and |
88 | * we don't let putname() free it (instead we free all of the saved | 98 | * we don't let putname() free it (instead we free all of the saved |
89 | * pointers at syscall exit time). | 99 | * pointers at syscall exit time). |
@@ -100,6 +110,8 @@ struct audit_names { | |||
100 | gid_t gid; | 110 | gid_t gid; |
101 | dev_t rdev; | 111 | dev_t rdev; |
102 | u32 osid; | 112 | u32 osid; |
113 | struct audit_cap_data fcap; | ||
114 | unsigned int fcap_ver; | ||
103 | }; | 115 | }; |
104 | 116 | ||
105 | struct audit_aux_data { | 117 | struct audit_aux_data { |
@@ -184,6 +196,20 @@ struct audit_aux_data_pids { | |||
184 | int pid_count; | 196 | int pid_count; |
185 | }; | 197 | }; |
186 | 198 | ||
199 | struct audit_aux_data_bprm_fcaps { | ||
200 | struct audit_aux_data d; | ||
201 | struct audit_cap_data fcap; | ||
202 | unsigned int fcap_ver; | ||
203 | struct audit_cap_data old_pcap; | ||
204 | struct audit_cap_data new_pcap; | ||
205 | }; | ||
206 | |||
207 | struct audit_aux_data_capset { | ||
208 | struct audit_aux_data d; | ||
209 | pid_t pid; | ||
210 | struct audit_cap_data cap; | ||
211 | }; | ||
212 | |||
187 | struct audit_tree_refs { | 213 | struct audit_tree_refs { |
188 | struct audit_tree_refs *next; | 214 | struct audit_tree_refs *next; |
189 | struct audit_chunk *c[31]; | 215 | struct audit_chunk *c[31]; |
@@ -421,6 +447,7 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
421 | struct audit_names *name, | 447 | struct audit_names *name, |
422 | enum audit_state *state) | 448 | enum audit_state *state) |
423 | { | 449 | { |
450 | const struct cred *cred = get_task_cred(tsk); | ||
424 | int i, j, need_sid = 1; | 451 | int i, j, need_sid = 1; |
425 | u32 sid; | 452 | u32 sid; |
426 | 453 | ||
@@ -440,28 +467,28 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
440 | } | 467 | } |
441 | break; | 468 | break; |
442 | case AUDIT_UID: | 469 | case AUDIT_UID: |
443 | result = audit_comparator(tsk->uid, f->op, f->val); | 470 | result = audit_comparator(cred->uid, f->op, f->val); |
444 | break; | 471 | break; |
445 | case AUDIT_EUID: | 472 | case AUDIT_EUID: |
446 | result = audit_comparator(tsk->euid, f->op, f->val); | 473 | result = audit_comparator(cred->euid, f->op, f->val); |
447 | break; | 474 | break; |
448 | case AUDIT_SUID: | 475 | case AUDIT_SUID: |
449 | result = audit_comparator(tsk->suid, f->op, f->val); | 476 | result = audit_comparator(cred->suid, f->op, f->val); |
450 | break; | 477 | break; |
451 | case AUDIT_FSUID: | 478 | case AUDIT_FSUID: |
452 | result = audit_comparator(tsk->fsuid, f->op, f->val); | 479 | result = audit_comparator(cred->fsuid, f->op, f->val); |
453 | break; | 480 | break; |
454 | case AUDIT_GID: | 481 | case AUDIT_GID: |
455 | result = audit_comparator(tsk->gid, f->op, f->val); | 482 | result = audit_comparator(cred->gid, f->op, f->val); |
456 | break; | 483 | break; |
457 | case AUDIT_EGID: | 484 | case AUDIT_EGID: |
458 | result = audit_comparator(tsk->egid, f->op, f->val); | 485 | result = audit_comparator(cred->egid, f->op, f->val); |
459 | break; | 486 | break; |
460 | case AUDIT_SGID: | 487 | case AUDIT_SGID: |
461 | result = audit_comparator(tsk->sgid, f->op, f->val); | 488 | result = audit_comparator(cred->sgid, f->op, f->val); |
462 | break; | 489 | break; |
463 | case AUDIT_FSGID: | 490 | case AUDIT_FSGID: |
464 | result = audit_comparator(tsk->fsgid, f->op, f->val); | 491 | result = audit_comparator(cred->fsgid, f->op, f->val); |
465 | break; | 492 | break; |
466 | case AUDIT_PERS: | 493 | case AUDIT_PERS: |
467 | result = audit_comparator(tsk->personality, f->op, f->val); | 494 | result = audit_comparator(tsk->personality, f->op, f->val); |
@@ -615,8 +642,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
615 | break; | 642 | break; |
616 | } | 643 | } |
617 | 644 | ||
618 | if (!result) | 645 | if (!result) { |
646 | put_cred(cred); | ||
619 | return 0; | 647 | return 0; |
648 | } | ||
620 | } | 649 | } |
621 | if (rule->filterkey && ctx) | 650 | if (rule->filterkey && ctx) |
622 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); | 651 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); |
@@ -624,6 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
624 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; | 653 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; |
625 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; | 654 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; |
626 | } | 655 | } |
656 | put_cred(cred); | ||
627 | return 1; | 657 | return 1; |
628 | } | 658 | } |
629 | 659 | ||
@@ -1171,8 +1201,38 @@ static void audit_log_execve_info(struct audit_context *context, | |||
1171 | kfree(buf); | 1201 | kfree(buf); |
1172 | } | 1202 | } |
1173 | 1203 | ||
1204 | static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) | ||
1205 | { | ||
1206 | int i; | ||
1207 | |||
1208 | audit_log_format(ab, " %s=", prefix); | ||
1209 | CAP_FOR_EACH_U32(i) { | ||
1210 | audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) | ||
1215 | { | ||
1216 | kernel_cap_t *perm = &name->fcap.permitted; | ||
1217 | kernel_cap_t *inh = &name->fcap.inheritable; | ||
1218 | int log = 0; | ||
1219 | |||
1220 | if (!cap_isclear(*perm)) { | ||
1221 | audit_log_cap(ab, "cap_fp", perm); | ||
1222 | log = 1; | ||
1223 | } | ||
1224 | if (!cap_isclear(*inh)) { | ||
1225 | audit_log_cap(ab, "cap_fi", inh); | ||
1226 | log = 1; | ||
1227 | } | ||
1228 | |||
1229 | if (log) | ||
1230 | audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver); | ||
1231 | } | ||
1232 | |||
1174 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) | 1233 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) |
1175 | { | 1234 | { |
1235 | const struct cred *cred; | ||
1176 | int i, call_panic = 0; | 1236 | int i, call_panic = 0; |
1177 | struct audit_buffer *ab; | 1237 | struct audit_buffer *ab; |
1178 | struct audit_aux_data *aux; | 1238 | struct audit_aux_data *aux; |
@@ -1182,14 +1242,15 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1182 | context->pid = tsk->pid; | 1242 | context->pid = tsk->pid; |
1183 | if (!context->ppid) | 1243 | if (!context->ppid) |
1184 | context->ppid = sys_getppid(); | 1244 | context->ppid = sys_getppid(); |
1185 | context->uid = tsk->uid; | 1245 | cred = current_cred(); |
1186 | context->gid = tsk->gid; | 1246 | context->uid = cred->uid; |
1187 | context->euid = tsk->euid; | 1247 | context->gid = cred->gid; |
1188 | context->suid = tsk->suid; | 1248 | context->euid = cred->euid; |
1189 | context->fsuid = tsk->fsuid; | 1249 | context->suid = cred->suid; |
1190 | context->egid = tsk->egid; | 1250 | context->fsuid = cred->fsuid; |
1191 | context->sgid = tsk->sgid; | 1251 | context->egid = cred->egid; |
1192 | context->fsgid = tsk->fsgid; | 1252 | context->sgid = cred->sgid; |
1253 | context->fsgid = cred->fsgid; | ||
1193 | context->personality = tsk->personality; | 1254 | context->personality = tsk->personality; |
1194 | 1255 | ||
1195 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); | 1256 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); |
@@ -1334,6 +1395,28 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1334 | audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]); | 1395 | audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]); |
1335 | break; } | 1396 | break; } |
1336 | 1397 | ||
1398 | case AUDIT_BPRM_FCAPS: { | ||
1399 | struct audit_aux_data_bprm_fcaps *axs = (void *)aux; | ||
1400 | audit_log_format(ab, "fver=%x", axs->fcap_ver); | ||
1401 | audit_log_cap(ab, "fp", &axs->fcap.permitted); | ||
1402 | audit_log_cap(ab, "fi", &axs->fcap.inheritable); | ||
1403 | audit_log_format(ab, " fe=%d", axs->fcap.fE); | ||
1404 | audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); | ||
1405 | audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); | ||
1406 | audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); | ||
1407 | audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); | ||
1408 | audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); | ||
1409 | audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); | ||
1410 | break; } | ||
1411 | |||
1412 | case AUDIT_CAPSET: { | ||
1413 | struct audit_aux_data_capset *axs = (void *)aux; | ||
1414 | audit_log_format(ab, "pid=%d", axs->pid); | ||
1415 | audit_log_cap(ab, "cap_pi", &axs->cap.inheritable); | ||
1416 | audit_log_cap(ab, "cap_pp", &axs->cap.permitted); | ||
1417 | audit_log_cap(ab, "cap_pe", &axs->cap.effective); | ||
1418 | break; } | ||
1419 | |||
1337 | } | 1420 | } |
1338 | audit_log_end(ab); | 1421 | audit_log_end(ab); |
1339 | } | 1422 | } |
@@ -1421,6 +1504,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1421 | } | 1504 | } |
1422 | } | 1505 | } |
1423 | 1506 | ||
1507 | audit_log_fcaps(ab, n); | ||
1508 | |||
1424 | audit_log_end(ab); | 1509 | audit_log_end(ab); |
1425 | } | 1510 | } |
1426 | 1511 | ||
@@ -1802,8 +1887,36 @@ static int audit_inc_name_count(struct audit_context *context, | |||
1802 | return 0; | 1887 | return 0; |
1803 | } | 1888 | } |
1804 | 1889 | ||
1890 | |||
1891 | static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry) | ||
1892 | { | ||
1893 | struct cpu_vfs_cap_data caps; | ||
1894 | int rc; | ||
1895 | |||
1896 | memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t)); | ||
1897 | memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t)); | ||
1898 | name->fcap.fE = 0; | ||
1899 | name->fcap_ver = 0; | ||
1900 | |||
1901 | if (!dentry) | ||
1902 | return 0; | ||
1903 | |||
1904 | rc = get_vfs_caps_from_disk(dentry, &caps); | ||
1905 | if (rc) | ||
1906 | return rc; | ||
1907 | |||
1908 | name->fcap.permitted = caps.permitted; | ||
1909 | name->fcap.inheritable = caps.inheritable; | ||
1910 | name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); | ||
1911 | name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; | ||
1912 | |||
1913 | return 0; | ||
1914 | } | ||
1915 | |||
1916 | |||
1805 | /* Copy inode data into an audit_names. */ | 1917 | /* Copy inode data into an audit_names. */ |
1806 | static void audit_copy_inode(struct audit_names *name, const struct inode *inode) | 1918 | static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry, |
1919 | const struct inode *inode) | ||
1807 | { | 1920 | { |
1808 | name->ino = inode->i_ino; | 1921 | name->ino = inode->i_ino; |
1809 | name->dev = inode->i_sb->s_dev; | 1922 | name->dev = inode->i_sb->s_dev; |
@@ -1812,6 +1925,7 @@ static void audit_copy_inode(struct audit_names *name, const struct inode *inode | |||
1812 | name->gid = inode->i_gid; | 1925 | name->gid = inode->i_gid; |
1813 | name->rdev = inode->i_rdev; | 1926 | name->rdev = inode->i_rdev; |
1814 | security_inode_getsecid(inode, &name->osid); | 1927 | security_inode_getsecid(inode, &name->osid); |
1928 | audit_copy_fcaps(name, dentry); | ||
1815 | } | 1929 | } |
1816 | 1930 | ||
1817 | /** | 1931 | /** |
@@ -1846,7 +1960,7 @@ void __audit_inode(const char *name, const struct dentry *dentry) | |||
1846 | context->names[idx].name = NULL; | 1960 | context->names[idx].name = NULL; |
1847 | } | 1961 | } |
1848 | handle_path(dentry); | 1962 | handle_path(dentry); |
1849 | audit_copy_inode(&context->names[idx], inode); | 1963 | audit_copy_inode(&context->names[idx], dentry, inode); |
1850 | } | 1964 | } |
1851 | 1965 | ||
1852 | /** | 1966 | /** |
@@ -1907,7 +2021,7 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry, | |||
1907 | if (!strcmp(dname, n->name) || | 2021 | if (!strcmp(dname, n->name) || |
1908 | !audit_compare_dname_path(dname, n->name, &dirlen)) { | 2022 | !audit_compare_dname_path(dname, n->name, &dirlen)) { |
1909 | if (inode) | 2023 | if (inode) |
1910 | audit_copy_inode(n, inode); | 2024 | audit_copy_inode(n, NULL, inode); |
1911 | else | 2025 | else |
1912 | n->ino = (unsigned long)-1; | 2026 | n->ino = (unsigned long)-1; |
1913 | found_child = n->name; | 2027 | found_child = n->name; |
@@ -1921,7 +2035,7 @@ add_names: | |||
1921 | return; | 2035 | return; |
1922 | idx = context->name_count - 1; | 2036 | idx = context->name_count - 1; |
1923 | context->names[idx].name = NULL; | 2037 | context->names[idx].name = NULL; |
1924 | audit_copy_inode(&context->names[idx], parent); | 2038 | audit_copy_inode(&context->names[idx], NULL, parent); |
1925 | } | 2039 | } |
1926 | 2040 | ||
1927 | if (!found_child) { | 2041 | if (!found_child) { |
@@ -1942,7 +2056,7 @@ add_names: | |||
1942 | } | 2056 | } |
1943 | 2057 | ||
1944 | if (inode) | 2058 | if (inode) |
1945 | audit_copy_inode(&context->names[idx], inode); | 2059 | audit_copy_inode(&context->names[idx], NULL, inode); |
1946 | else | 2060 | else |
1947 | context->names[idx].ino = (unsigned long)-1; | 2061 | context->names[idx].ino = (unsigned long)-1; |
1948 | } | 2062 | } |
@@ -1996,7 +2110,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) | |||
1996 | audit_log_format(ab, "login pid=%d uid=%u " | 2110 | audit_log_format(ab, "login pid=%d uid=%u " |
1997 | "old auid=%u new auid=%u" | 2111 | "old auid=%u new auid=%u" |
1998 | " old ses=%u new ses=%u", | 2112 | " old ses=%u new ses=%u", |
1999 | task->pid, task->uid, | 2113 | task->pid, task_uid(task), |
2000 | task->loginuid, loginuid, | 2114 | task->loginuid, loginuid, |
2001 | task->sessionid, sessionid); | 2115 | task->sessionid, sessionid); |
2002 | audit_log_end(ab); | 2116 | audit_log_end(ab); |
@@ -2379,7 +2493,7 @@ void __audit_ptrace(struct task_struct *t) | |||
2379 | 2493 | ||
2380 | context->target_pid = t->pid; | 2494 | context->target_pid = t->pid; |
2381 | context->target_auid = audit_get_loginuid(t); | 2495 | context->target_auid = audit_get_loginuid(t); |
2382 | context->target_uid = t->uid; | 2496 | context->target_uid = task_uid(t); |
2383 | context->target_sessionid = audit_get_sessionid(t); | 2497 | context->target_sessionid = audit_get_sessionid(t); |
2384 | security_task_getsecid(t, &context->target_sid); | 2498 | security_task_getsecid(t, &context->target_sid); |
2385 | memcpy(context->target_comm, t->comm, TASK_COMM_LEN); | 2499 | memcpy(context->target_comm, t->comm, TASK_COMM_LEN); |
@@ -2398,6 +2512,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2398 | struct audit_aux_data_pids *axp; | 2512 | struct audit_aux_data_pids *axp; |
2399 | struct task_struct *tsk = current; | 2513 | struct task_struct *tsk = current; |
2400 | struct audit_context *ctx = tsk->audit_context; | 2514 | struct audit_context *ctx = tsk->audit_context; |
2515 | uid_t uid = current_uid(), t_uid = task_uid(t); | ||
2401 | 2516 | ||
2402 | if (audit_pid && t->tgid == audit_pid) { | 2517 | if (audit_pid && t->tgid == audit_pid) { |
2403 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { | 2518 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
@@ -2405,7 +2520,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2405 | if (tsk->loginuid != -1) | 2520 | if (tsk->loginuid != -1) |
2406 | audit_sig_uid = tsk->loginuid; | 2521 | audit_sig_uid = tsk->loginuid; |
2407 | else | 2522 | else |
2408 | audit_sig_uid = tsk->uid; | 2523 | audit_sig_uid = uid; |
2409 | security_task_getsecid(tsk, &audit_sig_sid); | 2524 | security_task_getsecid(tsk, &audit_sig_sid); |
2410 | } | 2525 | } |
2411 | if (!audit_signals || audit_dummy_context()) | 2526 | if (!audit_signals || audit_dummy_context()) |
@@ -2417,7 +2532,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2417 | if (!ctx->target_pid) { | 2532 | if (!ctx->target_pid) { |
2418 | ctx->target_pid = t->tgid; | 2533 | ctx->target_pid = t->tgid; |
2419 | ctx->target_auid = audit_get_loginuid(t); | 2534 | ctx->target_auid = audit_get_loginuid(t); |
2420 | ctx->target_uid = t->uid; | 2535 | ctx->target_uid = t_uid; |
2421 | ctx->target_sessionid = audit_get_sessionid(t); | 2536 | ctx->target_sessionid = audit_get_sessionid(t); |
2422 | security_task_getsecid(t, &ctx->target_sid); | 2537 | security_task_getsecid(t, &ctx->target_sid); |
2423 | memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); | 2538 | memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); |
@@ -2438,7 +2553,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2438 | 2553 | ||
2439 | axp->target_pid[axp->pid_count] = t->tgid; | 2554 | axp->target_pid[axp->pid_count] = t->tgid; |
2440 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); | 2555 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); |
2441 | axp->target_uid[axp->pid_count] = t->uid; | 2556 | axp->target_uid[axp->pid_count] = t_uid; |
2442 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); | 2557 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); |
2443 | security_task_getsecid(t, &axp->target_sid[axp->pid_count]); | 2558 | security_task_getsecid(t, &axp->target_sid[axp->pid_count]); |
2444 | memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); | 2559 | memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); |
@@ -2448,6 +2563,86 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2448 | } | 2563 | } |
2449 | 2564 | ||
2450 | /** | 2565 | /** |
2566 | * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps | ||
2567 | * @bprm: pointer to the bprm being processed | ||
2568 | * @new: the proposed new credentials | ||
2569 | * @old: the old credentials | ||
2570 | * | ||
2571 | * Simply check if the proc already has the caps given by the file and if not | ||
2572 | * store the priv escalation info for later auditing at the end of the syscall | ||
2573 | * | ||
2574 | * -Eric | ||
2575 | */ | ||
2576 | int __audit_log_bprm_fcaps(struct linux_binprm *bprm, | ||
2577 | const struct cred *new, const struct cred *old) | ||
2578 | { | ||
2579 | struct audit_aux_data_bprm_fcaps *ax; | ||
2580 | struct audit_context *context = current->audit_context; | ||
2581 | struct cpu_vfs_cap_data vcaps; | ||
2582 | struct dentry *dentry; | ||
2583 | |||
2584 | ax = kmalloc(sizeof(*ax), GFP_KERNEL); | ||
2585 | if (!ax) | ||
2586 | return -ENOMEM; | ||
2587 | |||
2588 | ax->d.type = AUDIT_BPRM_FCAPS; | ||
2589 | ax->d.next = context->aux; | ||
2590 | context->aux = (void *)ax; | ||
2591 | |||
2592 | dentry = dget(bprm->file->f_dentry); | ||
2593 | get_vfs_caps_from_disk(dentry, &vcaps); | ||
2594 | dput(dentry); | ||
2595 | |||
2596 | ax->fcap.permitted = vcaps.permitted; | ||
2597 | ax->fcap.inheritable = vcaps.inheritable; | ||
2598 | ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); | ||
2599 | ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; | ||
2600 | |||
2601 | ax->old_pcap.permitted = old->cap_permitted; | ||
2602 | ax->old_pcap.inheritable = old->cap_inheritable; | ||
2603 | ax->old_pcap.effective = old->cap_effective; | ||
2604 | |||
2605 | ax->new_pcap.permitted = new->cap_permitted; | ||
2606 | ax->new_pcap.inheritable = new->cap_inheritable; | ||
2607 | ax->new_pcap.effective = new->cap_effective; | ||
2608 | return 0; | ||
2609 | } | ||
2610 | |||
2611 | /** | ||
2612 | * __audit_log_capset - store information about the arguments to the capset syscall | ||
2613 | * @pid: target pid of the capset call | ||
2614 | * @new: the new credentials | ||
2615 | * @old: the old (current) credentials | ||
2616 | * | ||
2617 | * Record the aguments userspace sent to sys_capset for later printing by the | ||
2618 | * audit system if applicable | ||
2619 | */ | ||
2620 | int __audit_log_capset(pid_t pid, | ||
2621 | const struct cred *new, const struct cred *old) | ||
2622 | { | ||
2623 | struct audit_aux_data_capset *ax; | ||
2624 | struct audit_context *context = current->audit_context; | ||
2625 | |||
2626 | if (likely(!audit_enabled || !context || context->dummy)) | ||
2627 | return 0; | ||
2628 | |||
2629 | ax = kmalloc(sizeof(*ax), GFP_KERNEL); | ||
2630 | if (!ax) | ||
2631 | return -ENOMEM; | ||
2632 | |||
2633 | ax->d.type = AUDIT_CAPSET; | ||
2634 | ax->d.next = context->aux; | ||
2635 | context->aux = (void *)ax; | ||
2636 | |||
2637 | ax->pid = pid; | ||
2638 | ax->cap.effective = new->cap_effective; | ||
2639 | ax->cap.inheritable = new->cap_effective; | ||
2640 | ax->cap.permitted = new->cap_permitted; | ||
2641 | |||
2642 | return 0; | ||
2643 | } | ||
2644 | |||
2645 | /** | ||
2451 | * audit_core_dumps - record information about processes that end abnormally | 2646 | * audit_core_dumps - record information about processes that end abnormally |
2452 | * @signr: signal value | 2647 | * @signr: signal value |
2453 | * | 2648 | * |
@@ -2458,7 +2653,8 @@ void audit_core_dumps(long signr) | |||
2458 | { | 2653 | { |
2459 | struct audit_buffer *ab; | 2654 | struct audit_buffer *ab; |
2460 | u32 sid; | 2655 | u32 sid; |
2461 | uid_t auid = audit_get_loginuid(current); | 2656 | uid_t auid = audit_get_loginuid(current), uid; |
2657 | gid_t gid; | ||
2462 | unsigned int sessionid = audit_get_sessionid(current); | 2658 | unsigned int sessionid = audit_get_sessionid(current); |
2463 | 2659 | ||
2464 | if (!audit_enabled) | 2660 | if (!audit_enabled) |
@@ -2468,8 +2664,9 @@ void audit_core_dumps(long signr) | |||
2468 | return; | 2664 | return; |
2469 | 2665 | ||
2470 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2666 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); |
2667 | current_uid_gid(&uid, &gid); | ||
2471 | audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", | 2668 | audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", |
2472 | auid, current->uid, current->gid, sessionid); | 2669 | auid, uid, gid, sessionid); |
2473 | security_task_getsecid(current, &sid); | 2670 | security_task_getsecid(current, &sid); |
2474 | if (sid) { | 2671 | if (sid) { |
2475 | char *ctx = NULL; | 2672 | char *ctx = NULL; |
diff --git a/kernel/capability.c b/kernel/capability.c index 33e51e78c2d8..36b4b4daebec 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> | 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/audit.h> | ||
10 | #include <linux/capability.h> | 11 | #include <linux/capability.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -14,12 +15,7 @@ | |||
14 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
15 | #include <linux/pid_namespace.h> | 16 | #include <linux/pid_namespace.h> |
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | 18 | #include "cred-internals.h" | |
18 | /* | ||
19 | * This lock protects task->cap_* for all tasks including current. | ||
20 | * Locking rule: acquire this prior to tasklist_lock. | ||
21 | */ | ||
22 | static DEFINE_SPINLOCK(task_capability_lock); | ||
23 | 19 | ||
24 | /* | 20 | /* |
25 | * Leveraged for setting/resetting capabilities | 21 | * Leveraged for setting/resetting capabilities |
@@ -33,6 +29,17 @@ EXPORT_SYMBOL(__cap_empty_set); | |||
33 | EXPORT_SYMBOL(__cap_full_set); | 29 | EXPORT_SYMBOL(__cap_full_set); |
34 | EXPORT_SYMBOL(__cap_init_eff_set); | 30 | EXPORT_SYMBOL(__cap_init_eff_set); |
35 | 31 | ||
32 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
33 | int file_caps_enabled = 1; | ||
34 | |||
35 | static int __init file_caps_disable(char *str) | ||
36 | { | ||
37 | file_caps_enabled = 0; | ||
38 | return 1; | ||
39 | } | ||
40 | __setup("no_file_caps", file_caps_disable); | ||
41 | #endif | ||
42 | |||
36 | /* | 43 | /* |
37 | * More recent versions of libcap are available from: | 44 | * More recent versions of libcap are available from: |
38 | * | 45 | * |
@@ -115,167 +122,12 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) | |||
115 | return 0; | 122 | return 0; |
116 | } | 123 | } |
117 | 124 | ||
118 | #ifndef CONFIG_SECURITY_FILE_CAPABILITIES | ||
119 | |||
120 | /* | ||
121 | * Without filesystem capability support, we nominally support one process | ||
122 | * setting the capabilities of another | ||
123 | */ | ||
124 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | ||
125 | kernel_cap_t *pIp, kernel_cap_t *pPp) | ||
126 | { | ||
127 | struct task_struct *target; | ||
128 | int ret; | ||
129 | |||
130 | spin_lock(&task_capability_lock); | ||
131 | read_lock(&tasklist_lock); | ||
132 | |||
133 | if (pid && pid != task_pid_vnr(current)) { | ||
134 | target = find_task_by_vpid(pid); | ||
135 | if (!target) { | ||
136 | ret = -ESRCH; | ||
137 | goto out; | ||
138 | } | ||
139 | } else | ||
140 | target = current; | ||
141 | |||
142 | ret = security_capget(target, pEp, pIp, pPp); | ||
143 | |||
144 | out: | ||
145 | read_unlock(&tasklist_lock); | ||
146 | spin_unlock(&task_capability_lock); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * cap_set_pg - set capabilities for all processes in a given process | ||
153 | * group. We call this holding task_capability_lock and tasklist_lock. | ||
154 | */ | ||
155 | static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, | ||
156 | kernel_cap_t *inheritable, | ||
157 | kernel_cap_t *permitted) | ||
158 | { | ||
159 | struct task_struct *g, *target; | ||
160 | int ret = -EPERM; | ||
161 | int found = 0; | ||
162 | struct pid *pgrp; | ||
163 | |||
164 | spin_lock(&task_capability_lock); | ||
165 | read_lock(&tasklist_lock); | ||
166 | |||
167 | pgrp = find_vpid(pgrp_nr); | ||
168 | do_each_pid_task(pgrp, PIDTYPE_PGID, g) { | ||
169 | target = g; | ||
170 | while_each_thread(g, target) { | ||
171 | if (!security_capset_check(target, effective, | ||
172 | inheritable, permitted)) { | ||
173 | security_capset_set(target, effective, | ||
174 | inheritable, permitted); | ||
175 | ret = 0; | ||
176 | } | ||
177 | found = 1; | ||
178 | } | ||
179 | } while_each_pid_task(pgrp, PIDTYPE_PGID, g); | ||
180 | |||
181 | read_unlock(&tasklist_lock); | ||
182 | spin_unlock(&task_capability_lock); | ||
183 | |||
184 | if (!found) | ||
185 | ret = 0; | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * cap_set_all - set capabilities for all processes other than init | ||
191 | * and self. We call this holding task_capability_lock and tasklist_lock. | ||
192 | */ | ||
193 | static inline int cap_set_all(kernel_cap_t *effective, | ||
194 | kernel_cap_t *inheritable, | ||
195 | kernel_cap_t *permitted) | ||
196 | { | ||
197 | struct task_struct *g, *target; | ||
198 | int ret = -EPERM; | ||
199 | int found = 0; | ||
200 | |||
201 | spin_lock(&task_capability_lock); | ||
202 | read_lock(&tasklist_lock); | ||
203 | |||
204 | do_each_thread(g, target) { | ||
205 | if (target == current | ||
206 | || is_container_init(target->group_leader)) | ||
207 | continue; | ||
208 | found = 1; | ||
209 | if (security_capset_check(target, effective, inheritable, | ||
210 | permitted)) | ||
211 | continue; | ||
212 | ret = 0; | ||
213 | security_capset_set(target, effective, inheritable, permitted); | ||
214 | } while_each_thread(g, target); | ||
215 | |||
216 | read_unlock(&tasklist_lock); | ||
217 | spin_unlock(&task_capability_lock); | ||
218 | |||
219 | if (!found) | ||
220 | ret = 0; | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Given the target pid does not refer to the current process we | ||
227 | * need more elaborate support... (This support is not present when | ||
228 | * filesystem capabilities are configured.) | ||
229 | */ | ||
230 | static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective, | ||
231 | kernel_cap_t *inheritable, | ||
232 | kernel_cap_t *permitted) | ||
233 | { | ||
234 | struct task_struct *target; | ||
235 | int ret; | ||
236 | |||
237 | if (!capable(CAP_SETPCAP)) | ||
238 | return -EPERM; | ||
239 | |||
240 | if (pid == -1) /* all procs other than current and init */ | ||
241 | return cap_set_all(effective, inheritable, permitted); | ||
242 | |||
243 | else if (pid < 0) /* all procs in process group */ | ||
244 | return cap_set_pg(-pid, effective, inheritable, permitted); | ||
245 | |||
246 | /* target != current */ | ||
247 | spin_lock(&task_capability_lock); | ||
248 | read_lock(&tasklist_lock); | ||
249 | |||
250 | target = find_task_by_vpid(pid); | ||
251 | if (!target) | ||
252 | ret = -ESRCH; | ||
253 | else { | ||
254 | ret = security_capset_check(target, effective, inheritable, | ||
255 | permitted); | ||
256 | |||
257 | /* having verified that the proposed changes are legal, | ||
258 | we now put them into effect. */ | ||
259 | if (!ret) | ||
260 | security_capset_set(target, effective, inheritable, | ||
261 | permitted); | ||
262 | } | ||
263 | |||
264 | read_unlock(&tasklist_lock); | ||
265 | spin_unlock(&task_capability_lock); | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | #else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
271 | |||
272 | /* | 125 | /* |
273 | * If we have configured with filesystem capability support, then the | 126 | * The only thing that can change the capabilities of the current |
274 | * only thing that can change the capabilities of the current process | 127 | * process is the current process. As such, we can't be in this code |
275 | * is the current process. As such, we can't be in this code at the | 128 | * at the same time as we are in the process of setting capabilities |
276 | * same time as we are in the process of setting capabilities in this | 129 | * in this process. The net result is that we can limit our use of |
277 | * process. The net result is that we can limit our use of locks to | 130 | * locks to when we are reading the caps of another process. |
278 | * when we are reading the caps of another process. | ||
279 | */ | 131 | */ |
280 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | 132 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, |
281 | kernel_cap_t *pIp, kernel_cap_t *pPp) | 133 | kernel_cap_t *pIp, kernel_cap_t *pPp) |
@@ -285,7 +137,6 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | |||
285 | if (pid && (pid != task_pid_vnr(current))) { | 137 | if (pid && (pid != task_pid_vnr(current))) { |
286 | struct task_struct *target; | 138 | struct task_struct *target; |
287 | 139 | ||
288 | spin_lock(&task_capability_lock); | ||
289 | read_lock(&tasklist_lock); | 140 | read_lock(&tasklist_lock); |
290 | 141 | ||
291 | target = find_task_by_vpid(pid); | 142 | target = find_task_by_vpid(pid); |
@@ -295,50 +146,12 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | |||
295 | ret = security_capget(target, pEp, pIp, pPp); | 146 | ret = security_capget(target, pEp, pIp, pPp); |
296 | 147 | ||
297 | read_unlock(&tasklist_lock); | 148 | read_unlock(&tasklist_lock); |
298 | spin_unlock(&task_capability_lock); | ||
299 | } else | 149 | } else |
300 | ret = security_capget(current, pEp, pIp, pPp); | 150 | ret = security_capget(current, pEp, pIp, pPp); |
301 | 151 | ||
302 | return ret; | 152 | return ret; |
303 | } | 153 | } |
304 | 154 | ||
305 | /* | ||
306 | * With filesystem capability support configured, the kernel does not | ||
307 | * permit the changing of capabilities in one process by another | ||
308 | * process. (CAP_SETPCAP has much less broad semantics when configured | ||
309 | * this way.) | ||
310 | */ | ||
311 | static inline int do_sys_capset_other_tasks(pid_t pid, | ||
312 | kernel_cap_t *effective, | ||
313 | kernel_cap_t *inheritable, | ||
314 | kernel_cap_t *permitted) | ||
315 | { | ||
316 | return -EPERM; | ||
317 | } | ||
318 | |||
319 | #endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
320 | |||
321 | /* | ||
322 | * Atomically modify the effective capabilities returning the original | ||
323 | * value. No permission check is performed here - it is assumed that the | ||
324 | * caller is permitted to set the desired effective capabilities. | ||
325 | */ | ||
326 | kernel_cap_t cap_set_effective(const kernel_cap_t pE_new) | ||
327 | { | ||
328 | kernel_cap_t pE_old; | ||
329 | |||
330 | spin_lock(&task_capability_lock); | ||
331 | |||
332 | pE_old = current->cap_effective; | ||
333 | current->cap_effective = pE_new; | ||
334 | |||
335 | spin_unlock(&task_capability_lock); | ||
336 | |||
337 | return pE_old; | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(cap_set_effective); | ||
341 | |||
342 | /** | 155 | /** |
343 | * sys_capget - get the capabilities of a given process. | 156 | * sys_capget - get the capabilities of a given process. |
344 | * @header: pointer to struct that contains capability version and | 157 | * @header: pointer to struct that contains capability version and |
@@ -366,7 +179,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
366 | return -EINVAL; | 179 | return -EINVAL; |
367 | 180 | ||
368 | ret = cap_get_target_pid(pid, &pE, &pI, &pP); | 181 | ret = cap_get_target_pid(pid, &pE, &pI, &pP); |
369 | |||
370 | if (!ret) { | 182 | if (!ret) { |
371 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 183 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
372 | unsigned i; | 184 | unsigned i; |
@@ -412,16 +224,14 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
412 | * @data: pointer to struct that contains the effective, permitted, | 224 | * @data: pointer to struct that contains the effective, permitted, |
413 | * and inheritable capabilities | 225 | * and inheritable capabilities |
414 | * | 226 | * |
415 | * Set capabilities for a given process, all processes, or all | 227 | * Set capabilities for the current process only. The ability to any other |
416 | * processes in a given process group. | 228 | * process(es) has been deprecated and removed. |
417 | * | 229 | * |
418 | * The restrictions on setting capabilities are specified as: | 230 | * The restrictions on setting capabilities are specified as: |
419 | * | 231 | * |
420 | * [pid is for the 'target' task. 'current' is the calling task.] | 232 | * I: any raised capabilities must be a subset of the old permitted |
421 | * | 233 | * P: any raised capabilities must be a subset of the old permitted |
422 | * I: any raised capabilities must be a subset of the (old current) permitted | 234 | * E: must be set to a subset of new permitted |
423 | * P: any raised capabilities must be a subset of the (old current) permitted | ||
424 | * E: must be set to a subset of (new target) permitted | ||
425 | * | 235 | * |
426 | * Returns 0 on success and < 0 on error. | 236 | * Returns 0 on success and < 0 on error. |
427 | */ | 237 | */ |
@@ -430,6 +240,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
430 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
431 | unsigned i, tocopy; | 241 | unsigned i, tocopy; |
432 | kernel_cap_t inheritable, permitted, effective; | 242 | kernel_cap_t inheritable, permitted, effective; |
243 | struct cred *new; | ||
433 | int ret; | 244 | int ret; |
434 | pid_t pid; | 245 | pid_t pid; |
435 | 246 | ||
@@ -440,10 +251,13 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
440 | if (get_user(pid, &header->pid)) | 251 | if (get_user(pid, &header->pid)) |
441 | return -EFAULT; | 252 | return -EFAULT; |
442 | 253 | ||
443 | if (copy_from_user(&kdata, data, tocopy | 254 | /* may only affect current now */ |
444 | * sizeof(struct __user_cap_data_struct))) { | 255 | if (pid != 0 && pid != task_pid_vnr(current)) |
256 | return -EPERM; | ||
257 | |||
258 | if (copy_from_user(&kdata, data, | ||
259 | tocopy * sizeof(struct __user_cap_data_struct))) | ||
445 | return -EFAULT; | 260 | return -EFAULT; |
446 | } | ||
447 | 261 | ||
448 | for (i = 0; i < tocopy; i++) { | 262 | for (i = 0; i < tocopy; i++) { |
449 | effective.cap[i] = kdata[i].effective; | 263 | effective.cap[i] = kdata[i].effective; |
@@ -457,32 +271,23 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
457 | i++; | 271 | i++; |
458 | } | 272 | } |
459 | 273 | ||
460 | if (pid && (pid != task_pid_vnr(current))) | 274 | new = prepare_creds(); |
461 | ret = do_sys_capset_other_tasks(pid, &effective, &inheritable, | 275 | if (!new) |
462 | &permitted); | 276 | return -ENOMEM; |
463 | else { | ||
464 | /* | ||
465 | * This lock is required even when filesystem | ||
466 | * capability support is configured - it protects the | ||
467 | * sys_capget() call from returning incorrect data in | ||
468 | * the case that the targeted process is not the | ||
469 | * current one. | ||
470 | */ | ||
471 | spin_lock(&task_capability_lock); | ||
472 | 277 | ||
473 | ret = security_capset_check(current, &effective, &inheritable, | 278 | ret = security_capset(new, current_cred(), |
474 | &permitted); | 279 | &effective, &inheritable, &permitted); |
475 | /* | 280 | if (ret < 0) |
476 | * Having verified that the proposed changes are | 281 | goto error; |
477 | * legal, we now put them into effect. | 282 | |
478 | */ | 283 | ret = audit_log_capset(pid, new, current_cred()); |
479 | if (!ret) | 284 | if (ret < 0) |
480 | security_capset_set(current, &effective, &inheritable, | 285 | return ret; |
481 | &permitted); | ||
482 | spin_unlock(&task_capability_lock); | ||
483 | } | ||
484 | 286 | ||
287 | return commit_creds(new); | ||
485 | 288 | ||
289 | error: | ||
290 | abort_creds(new); | ||
486 | return ret; | 291 | return ret; |
487 | } | 292 | } |
488 | 293 | ||
@@ -498,6 +303,11 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
498 | */ | 303 | */ |
499 | int capable(int cap) | 304 | int capable(int cap) |
500 | { | 305 | { |
306 | if (unlikely(!cap_valid(cap))) { | ||
307 | printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); | ||
308 | BUG(); | ||
309 | } | ||
310 | |||
501 | if (has_capability(current, cap)) { | 311 | if (has_capability(current, cap)) { |
502 | current->flags |= PF_SUPERPRIV; | 312 | current->flags |= PF_SUPERPRIV; |
503 | return 1; | 313 | return 1; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2606d0fb4e54..48348dde6d81 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -571,8 +571,8 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | |||
571 | 571 | ||
572 | if (inode) { | 572 | if (inode) { |
573 | inode->i_mode = mode; | 573 | inode->i_mode = mode; |
574 | inode->i_uid = current->fsuid; | 574 | inode->i_uid = current_fsuid(); |
575 | inode->i_gid = current->fsgid; | 575 | inode->i_gid = current_fsgid(); |
576 | inode->i_blocks = 0; | 576 | inode->i_blocks = 0; |
577 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 577 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
578 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | 578 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; |
@@ -1280,6 +1280,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1280 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) | 1280 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) |
1281 | { | 1281 | { |
1282 | struct task_struct *tsk; | 1282 | struct task_struct *tsk; |
1283 | const struct cred *cred = current_cred(), *tcred; | ||
1283 | int ret; | 1284 | int ret; |
1284 | 1285 | ||
1285 | if (pid) { | 1286 | if (pid) { |
@@ -1289,14 +1290,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) | |||
1289 | rcu_read_unlock(); | 1290 | rcu_read_unlock(); |
1290 | return -ESRCH; | 1291 | return -ESRCH; |
1291 | } | 1292 | } |
1292 | get_task_struct(tsk); | ||
1293 | rcu_read_unlock(); | ||
1294 | 1293 | ||
1295 | if ((current->euid) && (current->euid != tsk->uid) | 1294 | tcred = __task_cred(tsk); |
1296 | && (current->euid != tsk->suid)) { | 1295 | if (cred->euid && |
1297 | put_task_struct(tsk); | 1296 | cred->euid != tcred->uid && |
1297 | cred->euid != tcred->suid) { | ||
1298 | rcu_read_unlock(); | ||
1298 | return -EACCES; | 1299 | return -EACCES; |
1299 | } | 1300 | } |
1301 | get_task_struct(tsk); | ||
1302 | rcu_read_unlock(); | ||
1300 | } else { | 1303 | } else { |
1301 | tsk = current; | 1304 | tsk = current; |
1302 | get_task_struct(tsk); | 1305 | get_task_struct(tsk); |
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h new file mode 100644 index 000000000000..2dc4fc2d0bf1 --- /dev/null +++ b/kernel/cred-internals.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* Internal credentials stuff | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * user.c | ||
14 | */ | ||
15 | static inline void sched_switch_user(struct task_struct *p) | ||
16 | { | ||
17 | #ifdef CONFIG_USER_SCHED | ||
18 | sched_move_task(p); | ||
19 | #endif /* CONFIG_USER_SCHED */ | ||
20 | } | ||
21 | |||
diff --git a/kernel/cred.c b/kernel/cred.c new file mode 100644 index 000000000000..ff7bc071991c --- /dev/null +++ b/kernel/cred.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* Task credentials management - see Documentation/credentials.txt | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/cred.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/key.h> | ||
15 | #include <linux/keyctl.h> | ||
16 | #include <linux/init_task.h> | ||
17 | #include <linux/security.h> | ||
18 | #include <linux/cn_proc.h> | ||
19 | #include "cred-internals.h" | ||
20 | |||
21 | static struct kmem_cache *cred_jar; | ||
22 | |||
23 | /* | ||
24 | * The common credentials for the initial task's thread group | ||
25 | */ | ||
26 | #ifdef CONFIG_KEYS | ||
27 | static struct thread_group_cred init_tgcred = { | ||
28 | .usage = ATOMIC_INIT(2), | ||
29 | .tgid = 0, | ||
30 | .lock = SPIN_LOCK_UNLOCKED, | ||
31 | }; | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * The initial credentials for the initial task | ||
36 | */ | ||
37 | struct cred init_cred = { | ||
38 | .usage = ATOMIC_INIT(4), | ||
39 | .securebits = SECUREBITS_DEFAULT, | ||
40 | .cap_inheritable = CAP_INIT_INH_SET, | ||
41 | .cap_permitted = CAP_FULL_SET, | ||
42 | .cap_effective = CAP_INIT_EFF_SET, | ||
43 | .cap_bset = CAP_INIT_BSET, | ||
44 | .user = INIT_USER, | ||
45 | .group_info = &init_groups, | ||
46 | #ifdef CONFIG_KEYS | ||
47 | .tgcred = &init_tgcred, | ||
48 | #endif | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * Dispose of the shared task group credentials | ||
53 | */ | ||
54 | #ifdef CONFIG_KEYS | ||
55 | static void release_tgcred_rcu(struct rcu_head *rcu) | ||
56 | { | ||
57 | struct thread_group_cred *tgcred = | ||
58 | container_of(rcu, struct thread_group_cred, rcu); | ||
59 | |||
60 | BUG_ON(atomic_read(&tgcred->usage) != 0); | ||
61 | |||
62 | key_put(tgcred->session_keyring); | ||
63 | key_put(tgcred->process_keyring); | ||
64 | kfree(tgcred); | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | /* | ||
69 | * Release a set of thread group credentials. | ||
70 | */ | ||
71 | static void release_tgcred(struct cred *cred) | ||
72 | { | ||
73 | #ifdef CONFIG_KEYS | ||
74 | struct thread_group_cred *tgcred = cred->tgcred; | ||
75 | |||
76 | if (atomic_dec_and_test(&tgcred->usage)) | ||
77 | call_rcu(&tgcred->rcu, release_tgcred_rcu); | ||
78 | #endif | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * The RCU callback to actually dispose of a set of credentials | ||
83 | */ | ||
84 | static void put_cred_rcu(struct rcu_head *rcu) | ||
85 | { | ||
86 | struct cred *cred = container_of(rcu, struct cred, rcu); | ||
87 | |||
88 | if (atomic_read(&cred->usage) != 0) | ||
89 | panic("CRED: put_cred_rcu() sees %p with usage %d\n", | ||
90 | cred, atomic_read(&cred->usage)); | ||
91 | |||
92 | security_cred_free(cred); | ||
93 | key_put(cred->thread_keyring); | ||
94 | key_put(cred->request_key_auth); | ||
95 | release_tgcred(cred); | ||
96 | put_group_info(cred->group_info); | ||
97 | free_uid(cred->user); | ||
98 | kmem_cache_free(cred_jar, cred); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * __put_cred - Destroy a set of credentials | ||
103 | * @cred: The record to release | ||
104 | * | ||
105 | * Destroy a set of credentials on which no references remain. | ||
106 | */ | ||
107 | void __put_cred(struct cred *cred) | ||
108 | { | ||
109 | BUG_ON(atomic_read(&cred->usage) != 0); | ||
110 | |||
111 | call_rcu(&cred->rcu, put_cred_rcu); | ||
112 | } | ||
113 | EXPORT_SYMBOL(__put_cred); | ||
114 | |||
115 | /** | ||
116 | * prepare_creds - Prepare a new set of credentials for modification | ||
117 | * | ||
118 | * Prepare a new set of task credentials for modification. A task's creds | ||
119 | * shouldn't generally be modified directly, therefore this function is used to | ||
120 | * prepare a new copy, which the caller then modifies and then commits by | ||
121 | * calling commit_creds(). | ||
122 | * | ||
123 | * Preparation involves making a copy of the objective creds for modification. | ||
124 | * | ||
125 | * Returns a pointer to the new creds-to-be if successful, NULL otherwise. | ||
126 | * | ||
127 | * Call commit_creds() or abort_creds() to clean up. | ||
128 | */ | ||
129 | struct cred *prepare_creds(void) | ||
130 | { | ||
131 | struct task_struct *task = current; | ||
132 | const struct cred *old; | ||
133 | struct cred *new; | ||
134 | |||
135 | BUG_ON(atomic_read(&task->real_cred->usage) < 1); | ||
136 | |||
137 | new = kmem_cache_alloc(cred_jar, GFP_KERNEL); | ||
138 | if (!new) | ||
139 | return NULL; | ||
140 | |||
141 | old = task->cred; | ||
142 | memcpy(new, old, sizeof(struct cred)); | ||
143 | |||
144 | atomic_set(&new->usage, 1); | ||
145 | get_group_info(new->group_info); | ||
146 | get_uid(new->user); | ||
147 | |||
148 | #ifdef CONFIG_KEYS | ||
149 | key_get(new->thread_keyring); | ||
150 | key_get(new->request_key_auth); | ||
151 | atomic_inc(&new->tgcred->usage); | ||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_SECURITY | ||
155 | new->security = NULL; | ||
156 | #endif | ||
157 | |||
158 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | ||
159 | goto error; | ||
160 | return new; | ||
161 | |||
162 | error: | ||
163 | abort_creds(new); | ||
164 | return NULL; | ||
165 | } | ||
166 | EXPORT_SYMBOL(prepare_creds); | ||
167 | |||
168 | /* | ||
169 | * Prepare credentials for current to perform an execve() | ||
170 | * - The caller must hold current->cred_exec_mutex | ||
171 | */ | ||
172 | struct cred *prepare_exec_creds(void) | ||
173 | { | ||
174 | struct thread_group_cred *tgcred = NULL; | ||
175 | struct cred *new; | ||
176 | |||
177 | #ifdef CONFIG_KEYS | ||
178 | tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); | ||
179 | if (!tgcred) | ||
180 | return NULL; | ||
181 | #endif | ||
182 | |||
183 | new = prepare_creds(); | ||
184 | if (!new) { | ||
185 | kfree(tgcred); | ||
186 | return new; | ||
187 | } | ||
188 | |||
189 | #ifdef CONFIG_KEYS | ||
190 | /* newly exec'd tasks don't get a thread keyring */ | ||
191 | key_put(new->thread_keyring); | ||
192 | new->thread_keyring = NULL; | ||
193 | |||
194 | /* create a new per-thread-group creds for all this set of threads to | ||
195 | * share */ | ||
196 | memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); | ||
197 | |||
198 | atomic_set(&tgcred->usage, 1); | ||
199 | spin_lock_init(&tgcred->lock); | ||
200 | |||
201 | /* inherit the session keyring; new process keyring */ | ||
202 | key_get(tgcred->session_keyring); | ||
203 | tgcred->process_keyring = NULL; | ||
204 | |||
205 | release_tgcred(new); | ||
206 | new->tgcred = tgcred; | ||
207 | #endif | ||
208 | |||
209 | return new; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * prepare new credentials for the usermode helper dispatcher | ||
214 | */ | ||
215 | struct cred *prepare_usermodehelper_creds(void) | ||
216 | { | ||
217 | #ifdef CONFIG_KEYS | ||
218 | struct thread_group_cred *tgcred = NULL; | ||
219 | #endif | ||
220 | struct cred *new; | ||
221 | |||
222 | #ifdef CONFIG_KEYS | ||
223 | tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC); | ||
224 | if (!tgcred) | ||
225 | return NULL; | ||
226 | #endif | ||
227 | |||
228 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); | ||
229 | if (!new) | ||
230 | return NULL; | ||
231 | |||
232 | memcpy(new, &init_cred, sizeof(struct cred)); | ||
233 | |||
234 | atomic_set(&new->usage, 1); | ||
235 | get_group_info(new->group_info); | ||
236 | get_uid(new->user); | ||
237 | |||
238 | #ifdef CONFIG_KEYS | ||
239 | new->thread_keyring = NULL; | ||
240 | new->request_key_auth = NULL; | ||
241 | new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT; | ||
242 | |||
243 | atomic_set(&tgcred->usage, 1); | ||
244 | spin_lock_init(&tgcred->lock); | ||
245 | new->tgcred = tgcred; | ||
246 | #endif | ||
247 | |||
248 | #ifdef CONFIG_SECURITY | ||
249 | new->security = NULL; | ||
250 | #endif | ||
251 | if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) | ||
252 | goto error; | ||
253 | |||
254 | BUG_ON(atomic_read(&new->usage) != 1); | ||
255 | return new; | ||
256 | |||
257 | error: | ||
258 | put_cred(new); | ||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Copy credentials for the new process created by fork() | ||
264 | * | ||
265 | * We share if we can, but under some circumstances we have to generate a new | ||
266 | * set. | ||
267 | * | ||
268 | * The new process gets the current process's subjective credentials as its | ||
269 | * objective and subjective credentials | ||
270 | */ | ||
271 | int copy_creds(struct task_struct *p, unsigned long clone_flags) | ||
272 | { | ||
273 | #ifdef CONFIG_KEYS | ||
274 | struct thread_group_cred *tgcred; | ||
275 | #endif | ||
276 | struct cred *new; | ||
277 | int ret; | ||
278 | |||
279 | mutex_init(&p->cred_exec_mutex); | ||
280 | |||
281 | if ( | ||
282 | #ifdef CONFIG_KEYS | ||
283 | !p->cred->thread_keyring && | ||
284 | #endif | ||
285 | clone_flags & CLONE_THREAD | ||
286 | ) { | ||
287 | p->real_cred = get_cred(p->cred); | ||
288 | get_cred(p->cred); | ||
289 | atomic_inc(&p->cred->user->processes); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | new = prepare_creds(); | ||
294 | if (!new) | ||
295 | return -ENOMEM; | ||
296 | |||
297 | if (clone_flags & CLONE_NEWUSER) { | ||
298 | ret = create_user_ns(new); | ||
299 | if (ret < 0) | ||
300 | goto error_put; | ||
301 | } | ||
302 | |||
303 | #ifdef CONFIG_KEYS | ||
304 | /* new threads get their own thread keyrings if their parent already | ||
305 | * had one */ | ||
306 | if (new->thread_keyring) { | ||
307 | key_put(new->thread_keyring); | ||
308 | new->thread_keyring = NULL; | ||
309 | if (clone_flags & CLONE_THREAD) | ||
310 | install_thread_keyring_to_cred(new); | ||
311 | } | ||
312 | |||
313 | /* we share the process and session keyrings between all the threads in | ||
314 | * a process - this is slightly icky as we violate COW credentials a | ||
315 | * bit */ | ||
316 | if (!(clone_flags & CLONE_THREAD)) { | ||
317 | tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); | ||
318 | if (!tgcred) { | ||
319 | ret = -ENOMEM; | ||
320 | goto error_put; | ||
321 | } | ||
322 | atomic_set(&tgcred->usage, 1); | ||
323 | spin_lock_init(&tgcred->lock); | ||
324 | tgcred->process_keyring = NULL; | ||
325 | tgcred->session_keyring = key_get(new->tgcred->session_keyring); | ||
326 | |||
327 | release_tgcred(new); | ||
328 | new->tgcred = tgcred; | ||
329 | } | ||
330 | #endif | ||
331 | |||
332 | atomic_inc(&new->user->processes); | ||
333 | p->cred = p->real_cred = get_cred(new); | ||
334 | return 0; | ||
335 | |||
336 | error_put: | ||
337 | put_cred(new); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * commit_creds - Install new credentials upon the current task | ||
343 | * @new: The credentials to be assigned | ||
344 | * | ||
345 | * Install a new set of credentials to the current task, using RCU to replace | ||
346 | * the old set. Both the objective and the subjective credentials pointers are | ||
347 | * updated. This function may not be called if the subjective credentials are | ||
348 | * in an overridden state. | ||
349 | * | ||
350 | * This function eats the caller's reference to the new credentials. | ||
351 | * | ||
352 | * Always returns 0 thus allowing this function to be tail-called at the end | ||
353 | * of, say, sys_setgid(). | ||
354 | */ | ||
355 | int commit_creds(struct cred *new) | ||
356 | { | ||
357 | struct task_struct *task = current; | ||
358 | const struct cred *old; | ||
359 | |||
360 | BUG_ON(task->cred != task->real_cred); | ||
361 | BUG_ON(atomic_read(&task->real_cred->usage) < 2); | ||
362 | BUG_ON(atomic_read(&new->usage) < 1); | ||
363 | |||
364 | old = task->real_cred; | ||
365 | security_commit_creds(new, old); | ||
366 | |||
367 | get_cred(new); /* we will require a ref for the subj creds too */ | ||
368 | |||
369 | /* dumpability changes */ | ||
370 | if (old->euid != new->euid || | ||
371 | old->egid != new->egid || | ||
372 | old->fsuid != new->fsuid || | ||
373 | old->fsgid != new->fsgid || | ||
374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { | ||
375 | set_dumpable(task->mm, suid_dumpable); | ||
376 | task->pdeath_signal = 0; | ||
377 | smp_wmb(); | ||
378 | } | ||
379 | |||
380 | /* alter the thread keyring */ | ||
381 | if (new->fsuid != old->fsuid) | ||
382 | key_fsuid_changed(task); | ||
383 | if (new->fsgid != old->fsgid) | ||
384 | key_fsgid_changed(task); | ||
385 | |||
386 | /* do it | ||
387 | * - What if a process setreuid()'s and this brings the | ||
388 | * new uid over his NPROC rlimit? We can check this now | ||
389 | * cheaply with the new uid cache, so if it matters | ||
390 | * we should be checking for it. -DaveM | ||
391 | */ | ||
392 | if (new->user != old->user) | ||
393 | atomic_inc(&new->user->processes); | ||
394 | rcu_assign_pointer(task->real_cred, new); | ||
395 | rcu_assign_pointer(task->cred, new); | ||
396 | if (new->user != old->user) | ||
397 | atomic_dec(&old->user->processes); | ||
398 | |||
399 | sched_switch_user(task); | ||
400 | |||
401 | /* send notifications */ | ||
402 | if (new->uid != old->uid || | ||
403 | new->euid != old->euid || | ||
404 | new->suid != old->suid || | ||
405 | new->fsuid != old->fsuid) | ||
406 | proc_id_connector(task, PROC_EVENT_UID); | ||
407 | |||
408 | if (new->gid != old->gid || | ||
409 | new->egid != old->egid || | ||
410 | new->sgid != old->sgid || | ||
411 | new->fsgid != old->fsgid) | ||
412 | proc_id_connector(task, PROC_EVENT_GID); | ||
413 | |||
414 | /* release the old obj and subj refs both */ | ||
415 | put_cred(old); | ||
416 | put_cred(old); | ||
417 | return 0; | ||
418 | } | ||
419 | EXPORT_SYMBOL(commit_creds); | ||
420 | |||
421 | /** | ||
422 | * abort_creds - Discard a set of credentials and unlock the current task | ||
423 | * @new: The credentials that were going to be applied | ||
424 | * | ||
425 | * Discard a set of credentials that were under construction and unlock the | ||
426 | * current task. | ||
427 | */ | ||
428 | void abort_creds(struct cred *new) | ||
429 | { | ||
430 | BUG_ON(atomic_read(&new->usage) < 1); | ||
431 | put_cred(new); | ||
432 | } | ||
433 | EXPORT_SYMBOL(abort_creds); | ||
434 | |||
435 | /** | ||
436 | * override_creds - Override the current process's subjective credentials | ||
437 | * @new: The credentials to be assigned | ||
438 | * | ||
439 | * Install a set of temporary override subjective credentials on the current | ||
440 | * process, returning the old set for later reversion. | ||
441 | */ | ||
442 | const struct cred *override_creds(const struct cred *new) | ||
443 | { | ||
444 | const struct cred *old = current->cred; | ||
445 | |||
446 | rcu_assign_pointer(current->cred, get_cred(new)); | ||
447 | return old; | ||
448 | } | ||
449 | EXPORT_SYMBOL(override_creds); | ||
450 | |||
451 | /** | ||
452 | * revert_creds - Revert a temporary subjective credentials override | ||
453 | * @old: The credentials to be restored | ||
454 | * | ||
455 | * Revert a temporary set of override subjective credentials to an old set, | ||
456 | * discarding the override set. | ||
457 | */ | ||
458 | void revert_creds(const struct cred *old) | ||
459 | { | ||
460 | const struct cred *override = current->cred; | ||
461 | |||
462 | rcu_assign_pointer(current->cred, old); | ||
463 | put_cred(override); | ||
464 | } | ||
465 | EXPORT_SYMBOL(revert_creds); | ||
466 | |||
467 | /* | ||
468 | * initialise the credentials stuff | ||
469 | */ | ||
470 | void __init cred_init(void) | ||
471 | { | ||
472 | /* allocate a slab in which we can store credentials */ | ||
473 | cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), | ||
474 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * prepare_kernel_cred - Prepare a set of credentials for a kernel service | ||
479 | * @daemon: A userspace daemon to be used as a reference | ||
480 | * | ||
481 | * Prepare a set of credentials for a kernel service. This can then be used to | ||
482 | * override a task's own credentials so that work can be done on behalf of that | ||
483 | * task that requires a different subjective context. | ||
484 | * | ||
485 | * @daemon is used to provide a base for the security record, but can be NULL. | ||
486 | * If @daemon is supplied, then the security data will be derived from that; | ||
487 | * otherwise they'll be set to 0 and no groups, full capabilities and no keys. | ||
488 | * | ||
489 | * The caller may change these controls afterwards if desired. | ||
490 | * | ||
491 | * Returns the new credentials or NULL if out of memory. | ||
492 | * | ||
493 | * Does not take, and does not return holding current->cred_replace_mutex. | ||
494 | */ | ||
495 | struct cred *prepare_kernel_cred(struct task_struct *daemon) | ||
496 | { | ||
497 | const struct cred *old; | ||
498 | struct cred *new; | ||
499 | |||
500 | new = kmem_cache_alloc(cred_jar, GFP_KERNEL); | ||
501 | if (!new) | ||
502 | return NULL; | ||
503 | |||
504 | if (daemon) | ||
505 | old = get_task_cred(daemon); | ||
506 | else | ||
507 | old = get_cred(&init_cred); | ||
508 | |||
509 | get_uid(new->user); | ||
510 | get_group_info(new->group_info); | ||
511 | |||
512 | #ifdef CONFIG_KEYS | ||
513 | atomic_inc(&init_tgcred.usage); | ||
514 | new->tgcred = &init_tgcred; | ||
515 | new->request_key_auth = NULL; | ||
516 | new->thread_keyring = NULL; | ||
517 | new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; | ||
518 | #endif | ||
519 | |||
520 | #ifdef CONFIG_SECURITY | ||
521 | new->security = NULL; | ||
522 | #endif | ||
523 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | ||
524 | goto error; | ||
525 | |||
526 | atomic_set(&new->usage, 1); | ||
527 | put_cred(old); | ||
528 | return new; | ||
529 | |||
530 | error: | ||
531 | put_cred(new); | ||
532 | return NULL; | ||
533 | } | ||
534 | EXPORT_SYMBOL(prepare_kernel_cred); | ||
535 | |||
536 | /** | ||
537 | * set_security_override - Set the security ID in a set of credentials | ||
538 | * @new: The credentials to alter | ||
539 | * @secid: The LSM security ID to set | ||
540 | * | ||
541 | * Set the LSM security ID in a set of credentials so that the subjective | ||
542 | * security is overridden when an alternative set of credentials is used. | ||
543 | */ | ||
544 | int set_security_override(struct cred *new, u32 secid) | ||
545 | { | ||
546 | return security_kernel_act_as(new, secid); | ||
547 | } | ||
548 | EXPORT_SYMBOL(set_security_override); | ||
549 | |||
550 | /** | ||
551 | * set_security_override_from_ctx - Set the security ID in a set of credentials | ||
552 | * @new: The credentials to alter | ||
553 | * @secctx: The LSM security context to generate the security ID from. | ||
554 | * | ||
555 | * Set the LSM security ID in a set of credentials so that the subjective | ||
556 | * security is overridden when an alternative set of credentials is used. The | ||
557 | * security ID is specified in string form as a security context to be | ||
558 | * interpreted by the LSM. | ||
559 | */ | ||
560 | int set_security_override_from_ctx(struct cred *new, const char *secctx) | ||
561 | { | ||
562 | u32 secid; | ||
563 | int ret; | ||
564 | |||
565 | ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); | ||
566 | if (ret < 0) | ||
567 | return ret; | ||
568 | |||
569 | return set_security_override(new, secid); | ||
570 | } | ||
571 | EXPORT_SYMBOL(set_security_override_from_ctx); | ||
572 | |||
573 | /** | ||
574 | * set_create_files_as - Set the LSM file create context in a set of credentials | ||
575 | * @new: The credentials to alter | ||
576 | * @inode: The inode to take the context from | ||
577 | * | ||
578 | * Change the LSM file creation context in a set of credentials to be the same | ||
579 | * as the object context of the specified inode, so that the new inodes have | ||
580 | * the same MAC context as that inode. | ||
581 | */ | ||
582 | int set_create_files_as(struct cred *new, struct inode *inode) | ||
583 | { | ||
584 | new->fsuid = inode->i_uid; | ||
585 | new->fsgid = inode->i_gid; | ||
586 | return security_kernel_create_files_as(new, inode); | ||
587 | } | ||
588 | EXPORT_SYMBOL(set_create_files_as); | ||
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index b3179dad71be..abb6e17505e2 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
127 | */ | 127 | */ |
128 | t1 = tsk->sched_info.pcount; | 128 | t1 = tsk->sched_info.pcount; |
129 | t2 = tsk->sched_info.run_delay; | 129 | t2 = tsk->sched_info.run_delay; |
130 | t3 = tsk->sched_info.cpu_time; | 130 | t3 = tsk->se.sum_exec_runtime; |
131 | 131 | ||
132 | d->cpu_count += t1; | 132 | d->cpu_count += t1; |
133 | 133 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index e5ae36ebe8af..c7422ca92038 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -46,12 +46,14 @@ | |||
46 | #include <linux/blkdev.h> | 46 | #include <linux/blkdev.h> |
47 | #include <linux/task_io_accounting_ops.h> | 47 | #include <linux/task_io_accounting_ops.h> |
48 | #include <linux/tracehook.h> | 48 | #include <linux/tracehook.h> |
49 | #include <linux/init_task.h> | ||
49 | #include <trace/sched.h> | 50 | #include <trace/sched.h> |
50 | 51 | ||
51 | #include <asm/uaccess.h> | 52 | #include <asm/uaccess.h> |
52 | #include <asm/unistd.h> | 53 | #include <asm/unistd.h> |
53 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
54 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
56 | #include "cred-internals.h" | ||
55 | 57 | ||
56 | DEFINE_TRACE(sched_process_free); | 58 | DEFINE_TRACE(sched_process_free); |
57 | DEFINE_TRACE(sched_process_exit); | 59 | DEFINE_TRACE(sched_process_exit); |
@@ -168,7 +170,10 @@ void release_task(struct task_struct * p) | |||
168 | int zap_leader; | 170 | int zap_leader; |
169 | repeat: | 171 | repeat: |
170 | tracehook_prepare_release_task(p); | 172 | tracehook_prepare_release_task(p); |
171 | atomic_dec(&p->user->processes); | 173 | /* don't need to get the RCU readlock here - the process is dead and |
174 | * can't be modifying its own credentials */ | ||
175 | atomic_dec(&__task_cred(p)->user->processes); | ||
176 | |||
172 | proc_flush_task(p); | 177 | proc_flush_task(p); |
173 | write_lock_irq(&tasklist_lock); | 178 | write_lock_irq(&tasklist_lock); |
174 | tracehook_finish_release_task(p); | 179 | tracehook_finish_release_task(p); |
@@ -343,12 +348,12 @@ static void reparent_to_kthreadd(void) | |||
343 | /* cpus_allowed? */ | 348 | /* cpus_allowed? */ |
344 | /* rt_priority? */ | 349 | /* rt_priority? */ |
345 | /* signals? */ | 350 | /* signals? */ |
346 | security_task_reparent_to_init(current); | ||
347 | memcpy(current->signal->rlim, init_task.signal->rlim, | 351 | memcpy(current->signal->rlim, init_task.signal->rlim, |
348 | sizeof(current->signal->rlim)); | 352 | sizeof(current->signal->rlim)); |
349 | atomic_inc(&(INIT_USER->__count)); | 353 | |
354 | atomic_inc(&init_cred.usage); | ||
355 | commit_creds(&init_cred); | ||
350 | write_unlock_irq(&tasklist_lock); | 356 | write_unlock_irq(&tasklist_lock); |
351 | switch_uid(INIT_USER); | ||
352 | } | 357 | } |
353 | 358 | ||
354 | void __set_special_pids(struct pid *pid) | 359 | void __set_special_pids(struct pid *pid) |
@@ -1082,7 +1087,6 @@ NORET_TYPE void do_exit(long code) | |||
1082 | check_stack_usage(); | 1087 | check_stack_usage(); |
1083 | exit_thread(); | 1088 | exit_thread(); |
1084 | cgroup_exit(tsk, 1); | 1089 | cgroup_exit(tsk, 1); |
1085 | exit_keys(tsk); | ||
1086 | 1090 | ||
1087 | if (group_dead && tsk->signal->leader) | 1091 | if (group_dead && tsk->signal->leader) |
1088 | disassociate_ctty(1); | 1092 | disassociate_ctty(1); |
@@ -1266,12 +1270,12 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1266 | unsigned long state; | 1270 | unsigned long state; |
1267 | int retval, status, traced; | 1271 | int retval, status, traced; |
1268 | pid_t pid = task_pid_vnr(p); | 1272 | pid_t pid = task_pid_vnr(p); |
1273 | uid_t uid = __task_cred(p)->uid; | ||
1269 | 1274 | ||
1270 | if (!likely(options & WEXITED)) | 1275 | if (!likely(options & WEXITED)) |
1271 | return 0; | 1276 | return 0; |
1272 | 1277 | ||
1273 | if (unlikely(options & WNOWAIT)) { | 1278 | if (unlikely(options & WNOWAIT)) { |
1274 | uid_t uid = p->uid; | ||
1275 | int exit_code = p->exit_code; | 1279 | int exit_code = p->exit_code; |
1276 | int why, status; | 1280 | int why, status; |
1277 | 1281 | ||
@@ -1392,7 +1396,7 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1392 | if (!retval && infop) | 1396 | if (!retval && infop) |
1393 | retval = put_user(pid, &infop->si_pid); | 1397 | retval = put_user(pid, &infop->si_pid); |
1394 | if (!retval && infop) | 1398 | if (!retval && infop) |
1395 | retval = put_user(p->uid, &infop->si_uid); | 1399 | retval = put_user(uid, &infop->si_uid); |
1396 | if (!retval) | 1400 | if (!retval) |
1397 | retval = pid; | 1401 | retval = pid; |
1398 | 1402 | ||
@@ -1457,7 +1461,8 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1457 | if (!unlikely(options & WNOWAIT)) | 1461 | if (!unlikely(options & WNOWAIT)) |
1458 | p->exit_code = 0; | 1462 | p->exit_code = 0; |
1459 | 1463 | ||
1460 | uid = p->uid; | 1464 | /* don't need the RCU readlock here as we're holding a spinlock */ |
1465 | uid = __task_cred(p)->uid; | ||
1461 | unlock_sig: | 1466 | unlock_sig: |
1462 | spin_unlock_irq(&p->sighand->siglock); | 1467 | spin_unlock_irq(&p->sighand->siglock); |
1463 | if (!exit_code) | 1468 | if (!exit_code) |
@@ -1531,10 +1536,10 @@ static int wait_task_continued(struct task_struct *p, int options, | |||
1531 | } | 1536 | } |
1532 | if (!unlikely(options & WNOWAIT)) | 1537 | if (!unlikely(options & WNOWAIT)) |
1533 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1538 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1539 | uid = __task_cred(p)->uid; | ||
1534 | spin_unlock_irq(&p->sighand->siglock); | 1540 | spin_unlock_irq(&p->sighand->siglock); |
1535 | 1541 | ||
1536 | pid = task_pid_vnr(p); | 1542 | pid = task_pid_vnr(p); |
1537 | uid = p->uid; | ||
1538 | get_task_struct(p); | 1543 | get_task_struct(p); |
1539 | read_unlock(&tasklist_lock); | 1544 | read_unlock(&tasklist_lock); |
1540 | 1545 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 65ce60adc8e8..6144b36cd897 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -151,9 +151,8 @@ void __put_task_struct(struct task_struct *tsk) | |||
151 | WARN_ON(atomic_read(&tsk->usage)); | 151 | WARN_ON(atomic_read(&tsk->usage)); |
152 | WARN_ON(tsk == current); | 152 | WARN_ON(tsk == current); |
153 | 153 | ||
154 | security_task_free(tsk); | 154 | put_cred(tsk->real_cred); |
155 | free_uid(tsk->user); | 155 | put_cred(tsk->cred); |
156 | put_group_info(tsk->group_info); | ||
157 | delayacct_tsk_free(tsk); | 156 | delayacct_tsk_free(tsk); |
158 | 157 | ||
159 | if (!profile_handoff_task(tsk)) | 158 | if (!profile_handoff_task(tsk)) |
@@ -822,12 +821,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
822 | if (!sig) | 821 | if (!sig) |
823 | return -ENOMEM; | 822 | return -ENOMEM; |
824 | 823 | ||
825 | ret = copy_thread_group_keys(tsk); | ||
826 | if (ret < 0) { | ||
827 | kmem_cache_free(signal_cachep, sig); | ||
828 | return ret; | ||
829 | } | ||
830 | |||
831 | atomic_set(&sig->count, 1); | 824 | atomic_set(&sig->count, 1); |
832 | atomic_set(&sig->live, 1); | 825 | atomic_set(&sig->live, 1); |
833 | init_waitqueue_head(&sig->wait_chldexit); | 826 | init_waitqueue_head(&sig->wait_chldexit); |
@@ -872,7 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
872 | void __cleanup_signal(struct signal_struct *sig) | 865 | void __cleanup_signal(struct signal_struct *sig) |
873 | { | 866 | { |
874 | thread_group_cputime_free(sig); | 867 | thread_group_cputime_free(sig); |
875 | exit_thread_group_keys(sig); | ||
876 | tty_kref_put(sig->tty); | 868 | tty_kref_put(sig->tty); |
877 | kmem_cache_free(signal_cachep, sig); | 869 | kmem_cache_free(signal_cachep, sig); |
878 | } | 870 | } |
@@ -988,16 +980,16 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
988 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | 980 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
989 | #endif | 981 | #endif |
990 | retval = -EAGAIN; | 982 | retval = -EAGAIN; |
991 | if (atomic_read(&p->user->processes) >= | 983 | if (atomic_read(&p->real_cred->user->processes) >= |
992 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { | 984 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { |
993 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 985 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
994 | p->user != current->nsproxy->user_ns->root_user) | 986 | p->real_cred->user != INIT_USER) |
995 | goto bad_fork_free; | 987 | goto bad_fork_free; |
996 | } | 988 | } |
997 | 989 | ||
998 | atomic_inc(&p->user->__count); | 990 | retval = copy_creds(p, clone_flags); |
999 | atomic_inc(&p->user->processes); | 991 | if (retval < 0) |
1000 | get_group_info(p->group_info); | 992 | goto bad_fork_free; |
1001 | 993 | ||
1002 | /* | 994 | /* |
1003 | * If multiple threads are within copy_process(), then this check | 995 | * If multiple threads are within copy_process(), then this check |
@@ -1052,10 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1052 | do_posix_clock_monotonic_gettime(&p->start_time); | 1044 | do_posix_clock_monotonic_gettime(&p->start_time); |
1053 | p->real_start_time = p->start_time; | 1045 | p->real_start_time = p->start_time; |
1054 | monotonic_to_bootbased(&p->real_start_time); | 1046 | monotonic_to_bootbased(&p->real_start_time); |
1055 | #ifdef CONFIG_SECURITY | ||
1056 | p->security = NULL; | ||
1057 | #endif | ||
1058 | p->cap_bset = current->cap_bset; | ||
1059 | p->io_context = NULL; | 1047 | p->io_context = NULL; |
1060 | p->audit_context = NULL; | 1048 | p->audit_context = NULL; |
1061 | cgroup_fork(p); | 1049 | cgroup_fork(p); |
@@ -1102,10 +1090,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1102 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1090 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1103 | sched_fork(p, clone_flags); | 1091 | sched_fork(p, clone_flags); |
1104 | 1092 | ||
1105 | if ((retval = security_task_alloc(p))) | ||
1106 | goto bad_fork_cleanup_policy; | ||
1107 | if ((retval = audit_alloc(p))) | 1093 | if ((retval = audit_alloc(p))) |
1108 | goto bad_fork_cleanup_security; | 1094 | goto bad_fork_cleanup_policy; |
1109 | /* copy all the process information */ | 1095 | /* copy all the process information */ |
1110 | if ((retval = copy_semundo(clone_flags, p))) | 1096 | if ((retval = copy_semundo(clone_flags, p))) |
1111 | goto bad_fork_cleanup_audit; | 1097 | goto bad_fork_cleanup_audit; |
@@ -1119,10 +1105,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1119 | goto bad_fork_cleanup_sighand; | 1105 | goto bad_fork_cleanup_sighand; |
1120 | if ((retval = copy_mm(clone_flags, p))) | 1106 | if ((retval = copy_mm(clone_flags, p))) |
1121 | goto bad_fork_cleanup_signal; | 1107 | goto bad_fork_cleanup_signal; |
1122 | if ((retval = copy_keys(clone_flags, p))) | ||
1123 | goto bad_fork_cleanup_mm; | ||
1124 | if ((retval = copy_namespaces(clone_flags, p))) | 1108 | if ((retval = copy_namespaces(clone_flags, p))) |
1125 | goto bad_fork_cleanup_keys; | 1109 | goto bad_fork_cleanup_mm; |
1126 | if ((retval = copy_io(clone_flags, p))) | 1110 | if ((retval = copy_io(clone_flags, p))) |
1127 | goto bad_fork_cleanup_namespaces; | 1111 | goto bad_fork_cleanup_namespaces; |
1128 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1112 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); |
@@ -1291,8 +1275,6 @@ bad_fork_cleanup_io: | |||
1291 | put_io_context(p->io_context); | 1275 | put_io_context(p->io_context); |
1292 | bad_fork_cleanup_namespaces: | 1276 | bad_fork_cleanup_namespaces: |
1293 | exit_task_namespaces(p); | 1277 | exit_task_namespaces(p); |
1294 | bad_fork_cleanup_keys: | ||
1295 | exit_keys(p); | ||
1296 | bad_fork_cleanup_mm: | 1278 | bad_fork_cleanup_mm: |
1297 | if (p->mm) | 1279 | if (p->mm) |
1298 | mmput(p->mm); | 1280 | mmput(p->mm); |
@@ -1308,8 +1290,6 @@ bad_fork_cleanup_semundo: | |||
1308 | exit_sem(p); | 1290 | exit_sem(p); |
1309 | bad_fork_cleanup_audit: | 1291 | bad_fork_cleanup_audit: |
1310 | audit_free(p); | 1292 | audit_free(p); |
1311 | bad_fork_cleanup_security: | ||
1312 | security_task_free(p); | ||
1313 | bad_fork_cleanup_policy: | 1293 | bad_fork_cleanup_policy: |
1314 | #ifdef CONFIG_NUMA | 1294 | #ifdef CONFIG_NUMA |
1315 | mpol_put(p->mempolicy); | 1295 | mpol_put(p->mempolicy); |
@@ -1322,9 +1302,9 @@ bad_fork_cleanup_cgroup: | |||
1322 | bad_fork_cleanup_put_domain: | 1302 | bad_fork_cleanup_put_domain: |
1323 | module_put(task_thread_info(p)->exec_domain->module); | 1303 | module_put(task_thread_info(p)->exec_domain->module); |
1324 | bad_fork_cleanup_count: | 1304 | bad_fork_cleanup_count: |
1325 | put_group_info(p->group_info); | 1305 | atomic_dec(&p->cred->user->processes); |
1326 | atomic_dec(&p->user->processes); | 1306 | put_cred(p->real_cred); |
1327 | free_uid(p->user); | 1307 | put_cred(p->cred); |
1328 | bad_fork_free: | 1308 | bad_fork_free: |
1329 | free_task(p); | 1309 | free_task(p); |
1330 | fork_out: | 1310 | fork_out: |
@@ -1368,6 +1348,21 @@ long do_fork(unsigned long clone_flags, | |||
1368 | long nr; | 1348 | long nr; |
1369 | 1349 | ||
1370 | /* | 1350 | /* |
1351 | * Do some preliminary argument and permissions checking before we | ||
1352 | * actually start allocating stuff | ||
1353 | */ | ||
1354 | if (clone_flags & CLONE_NEWUSER) { | ||
1355 | if (clone_flags & CLONE_THREAD) | ||
1356 | return -EINVAL; | ||
1357 | /* hopefully this check will go away when userns support is | ||
1358 | * complete | ||
1359 | */ | ||
1360 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || | ||
1361 | !capable(CAP_SETGID)) | ||
1362 | return -EPERM; | ||
1363 | } | ||
1364 | |||
1365 | /* | ||
1371 | * We hope to recycle these flags after 2.6.26 | 1366 | * We hope to recycle these flags after 2.6.26 |
1372 | */ | 1367 | */ |
1373 | if (unlikely(clone_flags & CLONE_STOPPED)) { | 1368 | if (unlikely(clone_flags & CLONE_STOPPED)) { |
@@ -1615,8 +1610,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1615 | err = -EINVAL; | 1610 | err = -EINVAL; |
1616 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| | 1611 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
1617 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| | 1612 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| |
1618 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| | 1613 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) |
1619 | CLONE_NEWNET)) | ||
1620 | goto bad_unshare_out; | 1614 | goto bad_unshare_out; |
1621 | 1615 | ||
1622 | /* | 1616 | /* |
diff --git a/kernel/futex.c b/kernel/futex.c index 8af10027514b..4fe790e89d0f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -439,13 +439,20 @@ static void free_pi_state(struct futex_pi_state *pi_state) | |||
439 | static struct task_struct * futex_find_get_task(pid_t pid) | 439 | static struct task_struct * futex_find_get_task(pid_t pid) |
440 | { | 440 | { |
441 | struct task_struct *p; | 441 | struct task_struct *p; |
442 | const struct cred *cred = current_cred(), *pcred; | ||
442 | 443 | ||
443 | rcu_read_lock(); | 444 | rcu_read_lock(); |
444 | p = find_task_by_vpid(pid); | 445 | p = find_task_by_vpid(pid); |
445 | if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) | 446 | if (!p) { |
446 | p = ERR_PTR(-ESRCH); | 447 | p = ERR_PTR(-ESRCH); |
447 | else | 448 | } else { |
448 | get_task_struct(p); | 449 | pcred = __task_cred(p); |
450 | if (cred->euid != pcred->euid && | ||
451 | cred->euid != pcred->uid) | ||
452 | p = ERR_PTR(-ESRCH); | ||
453 | else | ||
454 | get_task_struct(p); | ||
455 | } | ||
449 | 456 | ||
450 | rcu_read_unlock(); | 457 | rcu_read_unlock(); |
451 | 458 | ||
@@ -1829,6 +1836,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | |||
1829 | { | 1836 | { |
1830 | struct robust_list_head __user *head; | 1837 | struct robust_list_head __user *head; |
1831 | unsigned long ret; | 1838 | unsigned long ret; |
1839 | const struct cred *cred = current_cred(), *pcred; | ||
1832 | 1840 | ||
1833 | if (!futex_cmpxchg_enabled) | 1841 | if (!futex_cmpxchg_enabled) |
1834 | return -ENOSYS; | 1842 | return -ENOSYS; |
@@ -1844,8 +1852,10 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | |||
1844 | if (!p) | 1852 | if (!p) |
1845 | goto err_unlock; | 1853 | goto err_unlock; |
1846 | ret = -EPERM; | 1854 | ret = -EPERM; |
1847 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 1855 | pcred = __task_cred(p); |
1848 | !capable(CAP_SYS_PTRACE)) | 1856 | if (cred->euid != pcred->euid && |
1857 | cred->euid != pcred->uid && | ||
1858 | !capable(CAP_SYS_PTRACE)) | ||
1849 | goto err_unlock; | 1859 | goto err_unlock; |
1850 | head = p->robust_list; | 1860 | head = p->robust_list; |
1851 | rcu_read_unlock(); | 1861 | rcu_read_unlock(); |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 04ac3a9e42cf..d607a5b9ee29 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -135,6 +135,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | |||
135 | { | 135 | { |
136 | struct compat_robust_list_head __user *head; | 136 | struct compat_robust_list_head __user *head; |
137 | unsigned long ret; | 137 | unsigned long ret; |
138 | const struct cred *cred = current_cred(), *pcred; | ||
138 | 139 | ||
139 | if (!futex_cmpxchg_enabled) | 140 | if (!futex_cmpxchg_enabled) |
140 | return -ENOSYS; | 141 | return -ENOSYS; |
@@ -150,8 +151,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | |||
150 | if (!p) | 151 | if (!p) |
151 | goto err_unlock; | 152 | goto err_unlock; |
152 | ret = -EPERM; | 153 | ret = -EPERM; |
153 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 154 | pcred = __task_cred(p); |
154 | !capable(CAP_SYS_PTRACE)) | 155 | if (cred->euid != pcred->euid && |
156 | cred->euid != pcred->uid && | ||
157 | !capable(CAP_SYS_PTRACE)) | ||
155 | goto err_unlock; | 158 | goto err_unlock; |
156 | head = p->compat_robust_list; | 159 | head = p->compat_robust_list; |
157 | read_unlock(&tasklist_lock); | 160 | read_unlock(&tasklist_lock); |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 7b8b0f21a5b1..e694afa0eb8c 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -30,20 +30,19 @@ | |||
30 | #define all_var 0 | 30 | #define all_var 0 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | /* These will be re-linked against their real values during the second link stage */ | 33 | extern const unsigned long kallsyms_addresses[]; |
34 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); | 34 | extern const u8 kallsyms_names[]; |
35 | extern const u8 kallsyms_names[] __attribute__((weak)); | ||
36 | 35 | ||
37 | /* tell the compiler that the count isn't in the small data section if the arch | 36 | /* tell the compiler that the count isn't in the small data section if the arch |
38 | * has one (eg: FRV) | 37 | * has one (eg: FRV) |
39 | */ | 38 | */ |
40 | extern const unsigned long kallsyms_num_syms | 39 | extern const unsigned long kallsyms_num_syms |
41 | __attribute__((weak, section(".rodata"))); | 40 | __attribute__((__section__(".rodata"))); |
42 | 41 | ||
43 | extern const u8 kallsyms_token_table[] __attribute__((weak)); | 42 | extern const u8 kallsyms_token_table[]; |
44 | extern const u16 kallsyms_token_index[] __attribute__((weak)); | 43 | extern const u16 kallsyms_token_index[]; |
45 | 44 | ||
46 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); | 45 | extern const unsigned long kallsyms_markers[]; |
47 | 46 | ||
48 | static inline int is_kernel_inittext(unsigned long addr) | 47 | static inline int is_kernel_inittext(unsigned long addr) |
49 | { | 48 | { |
@@ -168,9 +167,6 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
168 | unsigned long symbol_start = 0, symbol_end = 0; | 167 | unsigned long symbol_start = 0, symbol_end = 0; |
169 | unsigned long i, low, high, mid; | 168 | unsigned long i, low, high, mid; |
170 | 169 | ||
171 | /* This kernel should never had been booted. */ | ||
172 | BUG_ON(!kallsyms_addresses); | ||
173 | |||
174 | /* do a binary search on the sorted kallsyms_addresses array */ | 170 | /* do a binary search on the sorted kallsyms_addresses array */ |
175 | low = 0; | 171 | low = 0; |
176 | high = kallsyms_num_syms; | 172 | high = kallsyms_num_syms; |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 3d3c3ea3a023..b46dbb908669 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -118,10 +118,10 @@ EXPORT_SYMBOL(request_module); | |||
118 | struct subprocess_info { | 118 | struct subprocess_info { |
119 | struct work_struct work; | 119 | struct work_struct work; |
120 | struct completion *complete; | 120 | struct completion *complete; |
121 | struct cred *cred; | ||
121 | char *path; | 122 | char *path; |
122 | char **argv; | 123 | char **argv; |
123 | char **envp; | 124 | char **envp; |
124 | struct key *ring; | ||
125 | enum umh_wait wait; | 125 | enum umh_wait wait; |
126 | int retval; | 126 | int retval; |
127 | struct file *stdin; | 127 | struct file *stdin; |
@@ -134,19 +134,20 @@ struct subprocess_info { | |||
134 | static int ____call_usermodehelper(void *data) | 134 | static int ____call_usermodehelper(void *data) |
135 | { | 135 | { |
136 | struct subprocess_info *sub_info = data; | 136 | struct subprocess_info *sub_info = data; |
137 | struct key *new_session, *old_session; | ||
138 | int retval; | 137 | int retval; |
139 | 138 | ||
140 | /* Unblock all signals and set the session keyring. */ | 139 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); |
141 | new_session = key_get(sub_info->ring); | 140 | |
141 | /* Unblock all signals */ | ||
142 | spin_lock_irq(¤t->sighand->siglock); | 142 | spin_lock_irq(¤t->sighand->siglock); |
143 | old_session = __install_session_keyring(current, new_session); | ||
144 | flush_signal_handlers(current, 1); | 143 | flush_signal_handlers(current, 1); |
145 | sigemptyset(¤t->blocked); | 144 | sigemptyset(¤t->blocked); |
146 | recalc_sigpending(); | 145 | recalc_sigpending(); |
147 | spin_unlock_irq(¤t->sighand->siglock); | 146 | spin_unlock_irq(¤t->sighand->siglock); |
148 | 147 | ||
149 | key_put(old_session); | 148 | /* Install the credentials */ |
149 | commit_creds(sub_info->cred); | ||
150 | sub_info->cred = NULL; | ||
150 | 151 | ||
151 | /* Install input pipe when needed */ | 152 | /* Install input pipe when needed */ |
152 | if (sub_info->stdin) { | 153 | if (sub_info->stdin) { |
@@ -185,6 +186,8 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info) | |||
185 | { | 186 | { |
186 | if (info->cleanup) | 187 | if (info->cleanup) |
187 | (*info->cleanup)(info->argv, info->envp); | 188 | (*info->cleanup)(info->argv, info->envp); |
189 | if (info->cred) | ||
190 | put_cred(info->cred); | ||
188 | kfree(info); | 191 | kfree(info); |
189 | } | 192 | } |
190 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); | 193 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); |
@@ -240,6 +243,8 @@ static void __call_usermodehelper(struct work_struct *work) | |||
240 | pid_t pid; | 243 | pid_t pid; |
241 | enum umh_wait wait = sub_info->wait; | 244 | enum umh_wait wait = sub_info->wait; |
242 | 245 | ||
246 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); | ||
247 | |||
243 | /* CLONE_VFORK: wait until the usermode helper has execve'd | 248 | /* CLONE_VFORK: wait until the usermode helper has execve'd |
244 | * successfully We need the data structures to stay around | 249 | * successfully We need the data structures to stay around |
245 | * until that is done. */ | 250 | * until that is done. */ |
@@ -362,6 +367,9 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, | |||
362 | sub_info->path = path; | 367 | sub_info->path = path; |
363 | sub_info->argv = argv; | 368 | sub_info->argv = argv; |
364 | sub_info->envp = envp; | 369 | sub_info->envp = envp; |
370 | sub_info->cred = prepare_usermodehelper_creds(); | ||
371 | if (!sub_info->cred) | ||
372 | return NULL; | ||
365 | 373 | ||
366 | out: | 374 | out: |
367 | return sub_info; | 375 | return sub_info; |
@@ -376,7 +384,13 @@ EXPORT_SYMBOL(call_usermodehelper_setup); | |||
376 | void call_usermodehelper_setkeys(struct subprocess_info *info, | 384 | void call_usermodehelper_setkeys(struct subprocess_info *info, |
377 | struct key *session_keyring) | 385 | struct key *session_keyring) |
378 | { | 386 | { |
379 | info->ring = session_keyring; | 387 | #ifdef CONFIG_KEYS |
388 | struct thread_group_cred *tgcred = info->cred->tgcred; | ||
389 | key_put(tgcred->session_keyring); | ||
390 | tgcred->session_keyring = key_get(session_keyring); | ||
391 | #else | ||
392 | BUG(); | ||
393 | #endif | ||
380 | } | 394 | } |
381 | EXPORT_SYMBOL(call_usermodehelper_setkeys); | 395 | EXPORT_SYMBOL(call_usermodehelper_setkeys); |
382 | 396 | ||
@@ -444,6 +458,8 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, | |||
444 | DECLARE_COMPLETION_ONSTACK(done); | 458 | DECLARE_COMPLETION_ONSTACK(done); |
445 | int retval = 0; | 459 | int retval = 0; |
446 | 460 | ||
461 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); | ||
462 | |||
447 | helper_lock(); | 463 | helper_lock(); |
448 | if (sub_info->path[0] == '\0') | 464 | if (sub_info->path[0] == '\0') |
449 | goto out; | 465 | goto out; |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 1d3ef29a2583..63598dca2d0c 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -80,12 +80,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
80 | goto out_pid; | 80 | goto out_pid; |
81 | } | 81 | } |
82 | 82 | ||
83 | new_nsp->user_ns = copy_user_ns(flags, tsk->nsproxy->user_ns); | ||
84 | if (IS_ERR(new_nsp->user_ns)) { | ||
85 | err = PTR_ERR(new_nsp->user_ns); | ||
86 | goto out_user; | ||
87 | } | ||
88 | |||
89 | new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); | 83 | new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); |
90 | if (IS_ERR(new_nsp->net_ns)) { | 84 | if (IS_ERR(new_nsp->net_ns)) { |
91 | err = PTR_ERR(new_nsp->net_ns); | 85 | err = PTR_ERR(new_nsp->net_ns); |
@@ -95,9 +89,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
95 | return new_nsp; | 89 | return new_nsp; |
96 | 90 | ||
97 | out_net: | 91 | out_net: |
98 | if (new_nsp->user_ns) | ||
99 | put_user_ns(new_nsp->user_ns); | ||
100 | out_user: | ||
101 | if (new_nsp->pid_ns) | 92 | if (new_nsp->pid_ns) |
102 | put_pid_ns(new_nsp->pid_ns); | 93 | put_pid_ns(new_nsp->pid_ns); |
103 | out_pid: | 94 | out_pid: |
@@ -130,7 +121,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
130 | get_nsproxy(old_ns); | 121 | get_nsproxy(old_ns); |
131 | 122 | ||
132 | if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | | 123 | if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | |
133 | CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET))) | 124 | CLONE_NEWPID | CLONE_NEWNET))) |
134 | return 0; | 125 | return 0; |
135 | 126 | ||
136 | if (!capable(CAP_SYS_ADMIN)) { | 127 | if (!capable(CAP_SYS_ADMIN)) { |
@@ -173,8 +164,6 @@ void free_nsproxy(struct nsproxy *ns) | |||
173 | put_ipc_ns(ns->ipc_ns); | 164 | put_ipc_ns(ns->ipc_ns); |
174 | if (ns->pid_ns) | 165 | if (ns->pid_ns) |
175 | put_pid_ns(ns->pid_ns); | 166 | put_pid_ns(ns->pid_ns); |
176 | if (ns->user_ns) | ||
177 | put_user_ns(ns->user_ns); | ||
178 | put_net(ns->net_ns); | 167 | put_net(ns->net_ns); |
179 | kmem_cache_free(nsproxy_cachep, ns); | 168 | kmem_cache_free(nsproxy_cachep, ns); |
180 | } | 169 | } |
@@ -189,7 +178,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, | |||
189 | int err = 0; | 178 | int err = 0; |
190 | 179 | ||
191 | if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | | 180 | if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | |
192 | CLONE_NEWUSER | CLONE_NEWNET))) | 181 | CLONE_NEWNET))) |
193 | return 0; | 182 | return 0; |
194 | 183 | ||
195 | if (!capable(CAP_SYS_ADMIN)) | 184 | if (!capable(CAP_SYS_ADMIN)) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 100a71cfdaba..29dc700e198c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -127,6 +127,8 @@ int ptrace_check_attach(struct task_struct *child, int kill) | |||
127 | 127 | ||
128 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) | 128 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
129 | { | 129 | { |
130 | const struct cred *cred = current_cred(), *tcred; | ||
131 | |||
130 | /* May we inspect the given task? | 132 | /* May we inspect the given task? |
131 | * This check is used both for attaching with ptrace | 133 | * This check is used both for attaching with ptrace |
132 | * and for allowing access to sensitive information in /proc. | 134 | * and for allowing access to sensitive information in /proc. |
@@ -139,13 +141,19 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
139 | /* Don't let security modules deny introspection */ | 141 | /* Don't let security modules deny introspection */ |
140 | if (task == current) | 142 | if (task == current) |
141 | return 0; | 143 | return 0; |
142 | if (((current->uid != task->euid) || | 144 | rcu_read_lock(); |
143 | (current->uid != task->suid) || | 145 | tcred = __task_cred(task); |
144 | (current->uid != task->uid) || | 146 | if ((cred->uid != tcred->euid || |
145 | (current->gid != task->egid) || | 147 | cred->uid != tcred->suid || |
146 | (current->gid != task->sgid) || | 148 | cred->uid != tcred->uid || |
147 | (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) | 149 | cred->gid != tcred->egid || |
150 | cred->gid != tcred->sgid || | ||
151 | cred->gid != tcred->gid) && | ||
152 | !capable(CAP_SYS_PTRACE)) { | ||
153 | rcu_read_unlock(); | ||
148 | return -EPERM; | 154 | return -EPERM; |
155 | } | ||
156 | rcu_read_unlock(); | ||
149 | smp_rmb(); | 157 | smp_rmb(); |
150 | if (task->mm) | 158 | if (task->mm) |
151 | dumpable = get_dumpable(task->mm); | 159 | dumpable = get_dumpable(task->mm); |
@@ -175,6 +183,14 @@ int ptrace_attach(struct task_struct *task) | |||
175 | if (same_thread_group(task, current)) | 183 | if (same_thread_group(task, current)) |
176 | goto out; | 184 | goto out; |
177 | 185 | ||
186 | /* Protect exec's credential calculations against our interference; | ||
187 | * SUID, SGID and LSM creds get determined differently under ptrace. | ||
188 | */ | ||
189 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | ||
190 | if (retval < 0) | ||
191 | goto out; | ||
192 | |||
193 | retval = -EPERM; | ||
178 | repeat: | 194 | repeat: |
179 | /* | 195 | /* |
180 | * Nasty, nasty. | 196 | * Nasty, nasty. |
@@ -214,6 +230,7 @@ repeat: | |||
214 | bad: | 230 | bad: |
215 | write_unlock_irqrestore(&tasklist_lock, flags); | 231 | write_unlock_irqrestore(&tasklist_lock, flags); |
216 | task_unlock(task); | 232 | task_unlock(task); |
233 | mutex_unlock(¤t->cred_exec_mutex); | ||
217 | out: | 234 | out: |
218 | return retval; | 235 | return retval; |
219 | } | 236 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index dcb39bc88f6c..748ff924a290 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -267,6 +267,10 @@ struct task_group { | |||
267 | struct cgroup_subsys_state css; | 267 | struct cgroup_subsys_state css; |
268 | #endif | 268 | #endif |
269 | 269 | ||
270 | #ifdef CONFIG_USER_SCHED | ||
271 | uid_t uid; | ||
272 | #endif | ||
273 | |||
270 | #ifdef CONFIG_FAIR_GROUP_SCHED | 274 | #ifdef CONFIG_FAIR_GROUP_SCHED |
271 | /* schedulable entities of this group on each cpu */ | 275 | /* schedulable entities of this group on each cpu */ |
272 | struct sched_entity **se; | 276 | struct sched_entity **se; |
@@ -292,6 +296,12 @@ struct task_group { | |||
292 | 296 | ||
293 | #ifdef CONFIG_USER_SCHED | 297 | #ifdef CONFIG_USER_SCHED |
294 | 298 | ||
299 | /* Helper function to pass uid information to create_sched_user() */ | ||
300 | void set_tg_uid(struct user_struct *user) | ||
301 | { | ||
302 | user->tg->uid = user->uid; | ||
303 | } | ||
304 | |||
295 | /* | 305 | /* |
296 | * Root task group. | 306 | * Root task group. |
297 | * Every UID task group (including init_task_group aka UID-0) will | 307 | * Every UID task group (including init_task_group aka UID-0) will |
@@ -351,7 +361,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
351 | struct task_group *tg; | 361 | struct task_group *tg; |
352 | 362 | ||
353 | #ifdef CONFIG_USER_SCHED | 363 | #ifdef CONFIG_USER_SCHED |
354 | tg = p->user->tg; | 364 | rcu_read_lock(); |
365 | tg = __task_cred(p)->user->tg; | ||
366 | rcu_read_unlock(); | ||
355 | #elif defined(CONFIG_CGROUP_SCHED) | 367 | #elif defined(CONFIG_CGROUP_SCHED) |
356 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 368 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
357 | struct task_group, css); | 369 | struct task_group, css); |
@@ -592,6 +604,8 @@ struct rq { | |||
592 | #ifdef CONFIG_SCHEDSTATS | 604 | #ifdef CONFIG_SCHEDSTATS |
593 | /* latency stats */ | 605 | /* latency stats */ |
594 | struct sched_info rq_sched_info; | 606 | struct sched_info rq_sched_info; |
607 | unsigned long long rq_cpu_time; | ||
608 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
595 | 609 | ||
596 | /* sys_sched_yield() stats */ | 610 | /* sys_sched_yield() stats */ |
597 | unsigned int yld_exp_empty; | 611 | unsigned int yld_exp_empty; |
@@ -709,45 +723,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
709 | 723 | ||
710 | #undef SCHED_FEAT | 724 | #undef SCHED_FEAT |
711 | 725 | ||
712 | static int sched_feat_open(struct inode *inode, struct file *filp) | 726 | static int sched_feat_show(struct seq_file *m, void *v) |
713 | { | ||
714 | filp->private_data = inode->i_private; | ||
715 | return 0; | ||
716 | } | ||
717 | |||
718 | static ssize_t | ||
719 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
720 | size_t cnt, loff_t *ppos) | ||
721 | { | 727 | { |
722 | char *buf; | ||
723 | int r = 0; | ||
724 | int len = 0; | ||
725 | int i; | 728 | int i; |
726 | 729 | ||
727 | for (i = 0; sched_feat_names[i]; i++) { | 730 | for (i = 0; sched_feat_names[i]; i++) { |
728 | len += strlen(sched_feat_names[i]); | 731 | if (!(sysctl_sched_features & (1UL << i))) |
729 | len += 4; | 732 | seq_puts(m, "NO_"); |
733 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
730 | } | 734 | } |
735 | seq_puts(m, "\n"); | ||
731 | 736 | ||
732 | buf = kmalloc(len + 2, GFP_KERNEL); | 737 | return 0; |
733 | if (!buf) | ||
734 | return -ENOMEM; | ||
735 | |||
736 | for (i = 0; sched_feat_names[i]; i++) { | ||
737 | if (sysctl_sched_features & (1UL << i)) | ||
738 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
739 | else | ||
740 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
741 | } | ||
742 | |||
743 | r += sprintf(buf + r, "\n"); | ||
744 | WARN_ON(r >= len + 2); | ||
745 | |||
746 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
747 | |||
748 | kfree(buf); | ||
749 | |||
750 | return r; | ||
751 | } | 738 | } |
752 | 739 | ||
753 | static ssize_t | 740 | static ssize_t |
@@ -792,10 +779,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
792 | return cnt; | 779 | return cnt; |
793 | } | 780 | } |
794 | 781 | ||
782 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
783 | { | ||
784 | return single_open(filp, sched_feat_show, NULL); | ||
785 | } | ||
786 | |||
795 | static struct file_operations sched_feat_fops = { | 787 | static struct file_operations sched_feat_fops = { |
796 | .open = sched_feat_open, | 788 | .open = sched_feat_open, |
797 | .read = sched_feat_read, | 789 | .write = sched_feat_write, |
798 | .write = sched_feat_write, | 790 | .read = seq_read, |
791 | .llseek = seq_lseek, | ||
792 | .release = single_release, | ||
799 | }; | 793 | }; |
800 | 794 | ||
801 | static __init int sched_init_debug(void) | 795 | static __init int sched_init_debug(void) |
@@ -1480,27 +1474,13 @@ static void | |||
1480 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1474 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1481 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1475 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1482 | { | 1476 | { |
1483 | int boost = 0; | ||
1484 | unsigned long shares; | 1477 | unsigned long shares; |
1485 | unsigned long rq_weight; | 1478 | unsigned long rq_weight; |
1486 | 1479 | ||
1487 | if (!tg->se[cpu]) | 1480 | if (!tg->se[cpu]) |
1488 | return; | 1481 | return; |
1489 | 1482 | ||
1490 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1483 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
1491 | |||
1492 | /* | ||
1493 | * If there are currently no tasks on the cpu pretend there is one of | ||
1494 | * average load so that when a new task gets to run here it will not | ||
1495 | * get delayed by group starvation. | ||
1496 | */ | ||
1497 | if (!rq_weight) { | ||
1498 | boost = 1; | ||
1499 | rq_weight = NICE_0_LOAD; | ||
1500 | } | ||
1501 | |||
1502 | if (unlikely(rq_weight > sd_rq_weight)) | ||
1503 | rq_weight = sd_rq_weight; | ||
1504 | 1484 | ||
1505 | /* | 1485 | /* |
1506 | * \Sum shares * rq_weight | 1486 | * \Sum shares * rq_weight |
@@ -1508,7 +1488,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1508 | * \Sum rq_weight | 1488 | * \Sum rq_weight |
1509 | * | 1489 | * |
1510 | */ | 1490 | */ |
1511 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1491 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
1512 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1492 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
1513 | 1493 | ||
1514 | if (abs(shares - tg->se[cpu]->load.weight) > | 1494 | if (abs(shares - tg->se[cpu]->load.weight) > |
@@ -1517,11 +1497,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1517 | unsigned long flags; | 1497 | unsigned long flags; |
1518 | 1498 | ||
1519 | spin_lock_irqsave(&rq->lock, flags); | 1499 | spin_lock_irqsave(&rq->lock, flags); |
1520 | /* | 1500 | tg->cfs_rq[cpu]->shares = shares; |
1521 | * record the actual number of shares, not the boosted amount. | ||
1522 | */ | ||
1523 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1524 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1525 | 1501 | ||
1526 | __set_se_shares(tg->se[cpu], shares); | 1502 | __set_se_shares(tg->se[cpu], shares); |
1527 | spin_unlock_irqrestore(&rq->lock, flags); | 1503 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -1535,13 +1511,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1535 | */ | 1511 | */ |
1536 | static int tg_shares_up(struct task_group *tg, void *data) | 1512 | static int tg_shares_up(struct task_group *tg, void *data) |
1537 | { | 1513 | { |
1538 | unsigned long rq_weight = 0; | 1514 | unsigned long weight, rq_weight = 0; |
1539 | unsigned long shares = 0; | 1515 | unsigned long shares = 0; |
1540 | struct sched_domain *sd = data; | 1516 | struct sched_domain *sd = data; |
1541 | int i; | 1517 | int i; |
1542 | 1518 | ||
1543 | for_each_cpu_mask(i, sd->span) { | 1519 | for_each_cpu_mask(i, sd->span) { |
1544 | rq_weight += tg->cfs_rq[i]->load.weight; | 1520 | /* |
1521 | * If there are currently no tasks on the cpu pretend there | ||
1522 | * is one of average load so that when a new task gets to | ||
1523 | * run here it will not get delayed by group starvation. | ||
1524 | */ | ||
1525 | weight = tg->cfs_rq[i]->load.weight; | ||
1526 | if (!weight) | ||
1527 | weight = NICE_0_LOAD; | ||
1528 | |||
1529 | tg->cfs_rq[i]->rq_weight = weight; | ||
1530 | rq_weight += weight; | ||
1545 | shares += tg->cfs_rq[i]->shares; | 1531 | shares += tg->cfs_rq[i]->shares; |
1546 | } | 1532 | } |
1547 | 1533 | ||
@@ -1551,9 +1537,6 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1551 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1537 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1552 | shares = tg->shares; | 1538 | shares = tg->shares; |
1553 | 1539 | ||
1554 | if (!rq_weight) | ||
1555 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
1556 | |||
1557 | for_each_cpu_mask(i, sd->span) | 1540 | for_each_cpu_mask(i, sd->span) |
1558 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1541 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1559 | 1542 | ||
@@ -1618,6 +1601,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1618 | 1601 | ||
1619 | #endif | 1602 | #endif |
1620 | 1603 | ||
1604 | /* | ||
1605 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
1606 | */ | ||
1607 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1608 | __releases(this_rq->lock) | ||
1609 | __acquires(busiest->lock) | ||
1610 | __acquires(this_rq->lock) | ||
1611 | { | ||
1612 | int ret = 0; | ||
1613 | |||
1614 | if (unlikely(!irqs_disabled())) { | ||
1615 | /* printk() doesn't work good under rq->lock */ | ||
1616 | spin_unlock(&this_rq->lock); | ||
1617 | BUG_ON(1); | ||
1618 | } | ||
1619 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
1620 | if (busiest < this_rq) { | ||
1621 | spin_unlock(&this_rq->lock); | ||
1622 | spin_lock(&busiest->lock); | ||
1623 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
1624 | ret = 1; | ||
1625 | } else | ||
1626 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
1627 | } | ||
1628 | return ret; | ||
1629 | } | ||
1630 | |||
1631 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
1632 | __releases(busiest->lock) | ||
1633 | { | ||
1634 | spin_unlock(&busiest->lock); | ||
1635 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
1636 | } | ||
1621 | #endif | 1637 | #endif |
1622 | 1638 | ||
1623 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1639 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -2262,6 +2278,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2262 | 2278 | ||
2263 | smp_wmb(); | 2279 | smp_wmb(); |
2264 | rq = task_rq_lock(p, &flags); | 2280 | rq = task_rq_lock(p, &flags); |
2281 | update_rq_clock(rq); | ||
2265 | old_state = p->state; | 2282 | old_state = p->state; |
2266 | if (!(old_state & state)) | 2283 | if (!(old_state & state)) |
2267 | goto out; | 2284 | goto out; |
@@ -2319,7 +2336,6 @@ out_activate: | |||
2319 | schedstat_inc(p, se.nr_wakeups_local); | 2336 | schedstat_inc(p, se.nr_wakeups_local); |
2320 | else | 2337 | else |
2321 | schedstat_inc(p, se.nr_wakeups_remote); | 2338 | schedstat_inc(p, se.nr_wakeups_remote); |
2322 | update_rq_clock(rq); | ||
2323 | activate_task(rq, p, 1); | 2339 | activate_task(rq, p, 1); |
2324 | success = 1; | 2340 | success = 1; |
2325 | 2341 | ||
@@ -2820,40 +2836,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
2820 | } | 2836 | } |
2821 | 2837 | ||
2822 | /* | 2838 | /* |
2823 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
2824 | */ | ||
2825 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
2826 | __releases(this_rq->lock) | ||
2827 | __acquires(busiest->lock) | ||
2828 | __acquires(this_rq->lock) | ||
2829 | { | ||
2830 | int ret = 0; | ||
2831 | |||
2832 | if (unlikely(!irqs_disabled())) { | ||
2833 | /* printk() doesn't work good under rq->lock */ | ||
2834 | spin_unlock(&this_rq->lock); | ||
2835 | BUG_ON(1); | ||
2836 | } | ||
2837 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
2838 | if (busiest < this_rq) { | ||
2839 | spin_unlock(&this_rq->lock); | ||
2840 | spin_lock(&busiest->lock); | ||
2841 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
2842 | ret = 1; | ||
2843 | } else | ||
2844 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
2845 | } | ||
2846 | return ret; | ||
2847 | } | ||
2848 | |||
2849 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2850 | __releases(busiest->lock) | ||
2851 | { | ||
2852 | spin_unlock(&busiest->lock); | ||
2853 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2854 | } | ||
2855 | |||
2856 | /* | ||
2857 | * If dest_cpu is allowed for this process, migrate the task to it. | 2839 | * If dest_cpu is allowed for this process, migrate the task to it. |
2858 | * This is accomplished by forcing the cpu_allowed mask to only | 2840 | * This is accomplished by forcing the cpu_allowed mask to only |
2859 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2841 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
@@ -3714,7 +3696,7 @@ out_balanced: | |||
3714 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3696 | static void idle_balance(int this_cpu, struct rq *this_rq) |
3715 | { | 3697 | { |
3716 | struct sched_domain *sd; | 3698 | struct sched_domain *sd; |
3717 | int pulled_task = -1; | 3699 | int pulled_task = 0; |
3718 | unsigned long next_balance = jiffies + HZ; | 3700 | unsigned long next_balance = jiffies + HZ; |
3719 | cpumask_t tmpmask; | 3701 | cpumask_t tmpmask; |
3720 | 3702 | ||
@@ -5141,6 +5123,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
5141 | set_load_weight(p); | 5123 | set_load_weight(p); |
5142 | } | 5124 | } |
5143 | 5125 | ||
5126 | /* | ||
5127 | * check the target process has a UID that matches the current process's | ||
5128 | */ | ||
5129 | static bool check_same_owner(struct task_struct *p) | ||
5130 | { | ||
5131 | const struct cred *cred = current_cred(), *pcred; | ||
5132 | bool match; | ||
5133 | |||
5134 | rcu_read_lock(); | ||
5135 | pcred = __task_cred(p); | ||
5136 | match = (cred->euid == pcred->euid || | ||
5137 | cred->euid == pcred->uid); | ||
5138 | rcu_read_unlock(); | ||
5139 | return match; | ||
5140 | } | ||
5141 | |||
5144 | static int __sched_setscheduler(struct task_struct *p, int policy, | 5142 | static int __sched_setscheduler(struct task_struct *p, int policy, |
5145 | struct sched_param *param, bool user) | 5143 | struct sched_param *param, bool user) |
5146 | { | 5144 | { |
@@ -5200,8 +5198,7 @@ recheck: | |||
5200 | return -EPERM; | 5198 | return -EPERM; |
5201 | 5199 | ||
5202 | /* can't change other user's priorities */ | 5200 | /* can't change other user's priorities */ |
5203 | if ((current->euid != p->euid) && | 5201 | if (!check_same_owner(p)) |
5204 | (current->euid != p->uid)) | ||
5205 | return -EPERM; | 5202 | return -EPERM; |
5206 | } | 5203 | } |
5207 | 5204 | ||
@@ -5433,8 +5430,7 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5433 | read_unlock(&tasklist_lock); | 5430 | read_unlock(&tasklist_lock); |
5434 | 5431 | ||
5435 | retval = -EPERM; | 5432 | retval = -EPERM; |
5436 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5433 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5437 | !capable(CAP_SYS_NICE)) | ||
5438 | goto out_unlock; | 5434 | goto out_unlock; |
5439 | 5435 | ||
5440 | retval = security_task_setscheduler(p, 0, NULL); | 5436 | retval = security_task_setscheduler(p, 0, NULL); |
@@ -6134,7 +6130,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6134 | 6130 | ||
6135 | /* | 6131 | /* |
6136 | * Figure out where task on dead CPU should go, use force if necessary. | 6132 | * Figure out where task on dead CPU should go, use force if necessary. |
6137 | * NOTE: interrupts should be disabled by the caller | ||
6138 | */ | 6133 | */ |
6139 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6134 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6140 | { | 6135 | { |
@@ -6646,28 +6641,6 @@ early_initcall(migration_init); | |||
6646 | 6641 | ||
6647 | #ifdef CONFIG_SCHED_DEBUG | 6642 | #ifdef CONFIG_SCHED_DEBUG |
6648 | 6643 | ||
6649 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
6650 | { | ||
6651 | switch (lvl) { | ||
6652 | case SD_LV_NONE: | ||
6653 | return "NONE"; | ||
6654 | case SD_LV_SIBLING: | ||
6655 | return "SIBLING"; | ||
6656 | case SD_LV_MC: | ||
6657 | return "MC"; | ||
6658 | case SD_LV_CPU: | ||
6659 | return "CPU"; | ||
6660 | case SD_LV_NODE: | ||
6661 | return "NODE"; | ||
6662 | case SD_LV_ALLNODES: | ||
6663 | return "ALLNODES"; | ||
6664 | case SD_LV_MAX: | ||
6665 | return "MAX"; | ||
6666 | |||
6667 | } | ||
6668 | return "MAX"; | ||
6669 | } | ||
6670 | |||
6671 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6644 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6672 | cpumask_t *groupmask) | 6645 | cpumask_t *groupmask) |
6673 | { | 6646 | { |
@@ -6687,8 +6660,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6687 | return -1; | 6660 | return -1; |
6688 | } | 6661 | } |
6689 | 6662 | ||
6690 | printk(KERN_CONT "span %s level %s\n", | 6663 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6691 | str, sd_level_to_string(sd->level)); | ||
6692 | 6664 | ||
6693 | if (!cpu_isset(cpu, sd->span)) { | 6665 | if (!cpu_isset(cpu, sd->span)) { |
6694 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6666 | printk(KERN_ERR "ERROR: domain->span does not contain " |
@@ -6824,6 +6796,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6824 | SD_BALANCE_EXEC | | 6796 | SD_BALANCE_EXEC | |
6825 | SD_SHARE_CPUPOWER | | 6797 | SD_SHARE_CPUPOWER | |
6826 | SD_SHARE_PKG_RESOURCES); | 6798 | SD_SHARE_PKG_RESOURCES); |
6799 | if (nr_node_ids == 1) | ||
6800 | pflags &= ~SD_SERIALIZE; | ||
6827 | } | 6801 | } |
6828 | if (~cflags & pflags) | 6802 | if (~cflags & pflags) |
6829 | return 0; | 6803 | return 0; |
@@ -7344,13 +7318,21 @@ struct allmasks { | |||
7344 | }; | 7318 | }; |
7345 | 7319 | ||
7346 | #if NR_CPUS > 128 | 7320 | #if NR_CPUS > 128 |
7347 | #define SCHED_CPUMASK_ALLOC 1 | 7321 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v |
7348 | #define SCHED_CPUMASK_FREE(v) kfree(v) | 7322 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7349 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | 7323 | { |
7324 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7325 | } | ||
7326 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7327 | { | ||
7328 | kfree(masks); | ||
7329 | } | ||
7350 | #else | 7330 | #else |
7351 | #define SCHED_CPUMASK_ALLOC 0 | 7331 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v |
7352 | #define SCHED_CPUMASK_FREE(v) | 7332 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7353 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | 7333 | { } |
7334 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7335 | { } | ||
7354 | #endif | 7336 | #endif |
7355 | 7337 | ||
7356 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | 7338 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ |
@@ -7426,9 +7408,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7426 | return -ENOMEM; | 7408 | return -ENOMEM; |
7427 | } | 7409 | } |
7428 | 7410 | ||
7429 | #if SCHED_CPUMASK_ALLOC | ||
7430 | /* get space for all scratch cpumask variables */ | 7411 | /* get space for all scratch cpumask variables */ |
7431 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | 7412 | sched_cpumask_alloc(&allmasks); |
7432 | if (!allmasks) { | 7413 | if (!allmasks) { |
7433 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | 7414 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); |
7434 | kfree(rd); | 7415 | kfree(rd); |
@@ -7437,7 +7418,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7437 | #endif | 7418 | #endif |
7438 | return -ENOMEM; | 7419 | return -ENOMEM; |
7439 | } | 7420 | } |
7440 | #endif | 7421 | |
7441 | tmpmask = (cpumask_t *)allmasks; | 7422 | tmpmask = (cpumask_t *)allmasks; |
7442 | 7423 | ||
7443 | 7424 | ||
@@ -7691,13 +7672,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7691 | cpu_attach_domain(sd, rd, i); | 7672 | cpu_attach_domain(sd, rd, i); |
7692 | } | 7673 | } |
7693 | 7674 | ||
7694 | SCHED_CPUMASK_FREE((void *)allmasks); | 7675 | sched_cpumask_free(allmasks); |
7695 | return 0; | 7676 | return 0; |
7696 | 7677 | ||
7697 | #ifdef CONFIG_NUMA | 7678 | #ifdef CONFIG_NUMA |
7698 | error: | 7679 | error: |
7699 | free_sched_groups(cpu_map, tmpmask); | 7680 | free_sched_groups(cpu_map, tmpmask); |
7700 | SCHED_CPUMASK_FREE((void *)allmasks); | 7681 | sched_cpumask_free(allmasks); |
7701 | kfree(rd); | 7682 | kfree(rd); |
7702 | return -ENOMEM; | 7683 | return -ENOMEM; |
7703 | #endif | 7684 | #endif |
@@ -7720,8 +7701,14 @@ static struct sched_domain_attr *dattr_cur; | |||
7720 | */ | 7701 | */ |
7721 | static cpumask_t fallback_doms; | 7702 | static cpumask_t fallback_doms; |
7722 | 7703 | ||
7723 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7704 | /* |
7705 | * arch_update_cpu_topology lets virtualized architectures update the | ||
7706 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
7707 | * or 0 if it stayed the same. | ||
7708 | */ | ||
7709 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
7724 | { | 7710 | { |
7711 | return 0; | ||
7725 | } | 7712 | } |
7726 | 7713 | ||
7727 | /* | 7714 | /* |
@@ -7761,8 +7748,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7761 | cpumask_t tmpmask; | 7748 | cpumask_t tmpmask; |
7762 | int i; | 7749 | int i; |
7763 | 7750 | ||
7764 | unregister_sched_domain_sysctl(); | ||
7765 | |||
7766 | for_each_cpu_mask_nr(i, *cpu_map) | 7751 | for_each_cpu_mask_nr(i, *cpu_map) |
7767 | cpu_attach_domain(NULL, &def_root_domain, i); | 7752 | cpu_attach_domain(NULL, &def_root_domain, i); |
7768 | synchronize_sched(); | 7753 | synchronize_sched(); |
@@ -7815,17 +7800,21 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7815 | struct sched_domain_attr *dattr_new) | 7800 | struct sched_domain_attr *dattr_new) |
7816 | { | 7801 | { |
7817 | int i, j, n; | 7802 | int i, j, n; |
7803 | int new_topology; | ||
7818 | 7804 | ||
7819 | mutex_lock(&sched_domains_mutex); | 7805 | mutex_lock(&sched_domains_mutex); |
7820 | 7806 | ||
7821 | /* always unregister in case we don't destroy any domains */ | 7807 | /* always unregister in case we don't destroy any domains */ |
7822 | unregister_sched_domain_sysctl(); | 7808 | unregister_sched_domain_sysctl(); |
7823 | 7809 | ||
7810 | /* Let architecture update cpu core mappings. */ | ||
7811 | new_topology = arch_update_cpu_topology(); | ||
7812 | |||
7824 | n = doms_new ? ndoms_new : 0; | 7813 | n = doms_new ? ndoms_new : 0; |
7825 | 7814 | ||
7826 | /* Destroy deleted domains */ | 7815 | /* Destroy deleted domains */ |
7827 | for (i = 0; i < ndoms_cur; i++) { | 7816 | for (i = 0; i < ndoms_cur; i++) { |
7828 | for (j = 0; j < n; j++) { | 7817 | for (j = 0; j < n && !new_topology; j++) { |
7829 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7818 | if (cpus_equal(doms_cur[i], doms_new[j]) |
7830 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7819 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7831 | goto match1; | 7820 | goto match1; |
@@ -7840,12 +7829,12 @@ match1: | |||
7840 | ndoms_cur = 0; | 7829 | ndoms_cur = 0; |
7841 | doms_new = &fallback_doms; | 7830 | doms_new = &fallback_doms; |
7842 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7831 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
7843 | dattr_new = NULL; | 7832 | WARN_ON_ONCE(dattr_new); |
7844 | } | 7833 | } |
7845 | 7834 | ||
7846 | /* Build new domains */ | 7835 | /* Build new domains */ |
7847 | for (i = 0; i < ndoms_new; i++) { | 7836 | for (i = 0; i < ndoms_new; i++) { |
7848 | for (j = 0; j < ndoms_cur; j++) { | 7837 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7849 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7838 | if (cpus_equal(doms_new[i], doms_cur[j]) |
7850 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7839 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7851 | goto match2; | 7840 | goto match2; |
@@ -8500,7 +8489,7 @@ static | |||
8500 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8489 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
8501 | { | 8490 | { |
8502 | struct cfs_rq *cfs_rq; | 8491 | struct cfs_rq *cfs_rq; |
8503 | struct sched_entity *se, *parent_se; | 8492 | struct sched_entity *se; |
8504 | struct rq *rq; | 8493 | struct rq *rq; |
8505 | int i; | 8494 | int i; |
8506 | 8495 | ||
@@ -8516,18 +8505,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8516 | for_each_possible_cpu(i) { | 8505 | for_each_possible_cpu(i) { |
8517 | rq = cpu_rq(i); | 8506 | rq = cpu_rq(i); |
8518 | 8507 | ||
8519 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8508 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8520 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8509 | GFP_KERNEL, cpu_to_node(i)); |
8521 | if (!cfs_rq) | 8510 | if (!cfs_rq) |
8522 | goto err; | 8511 | goto err; |
8523 | 8512 | ||
8524 | se = kmalloc_node(sizeof(struct sched_entity), | 8513 | se = kzalloc_node(sizeof(struct sched_entity), |
8525 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8514 | GFP_KERNEL, cpu_to_node(i)); |
8526 | if (!se) | 8515 | if (!se) |
8527 | goto err; | 8516 | goto err; |
8528 | 8517 | ||
8529 | parent_se = parent ? parent->se[i] : NULL; | 8518 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
8530 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
8531 | } | 8519 | } |
8532 | 8520 | ||
8533 | return 1; | 8521 | return 1; |
@@ -8588,7 +8576,7 @@ static | |||
8588 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8576 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
8589 | { | 8577 | { |
8590 | struct rt_rq *rt_rq; | 8578 | struct rt_rq *rt_rq; |
8591 | struct sched_rt_entity *rt_se, *parent_se; | 8579 | struct sched_rt_entity *rt_se; |
8592 | struct rq *rq; | 8580 | struct rq *rq; |
8593 | int i; | 8581 | int i; |
8594 | 8582 | ||
@@ -8605,18 +8593,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8605 | for_each_possible_cpu(i) { | 8593 | for_each_possible_cpu(i) { |
8606 | rq = cpu_rq(i); | 8594 | rq = cpu_rq(i); |
8607 | 8595 | ||
8608 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8596 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8609 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8597 | GFP_KERNEL, cpu_to_node(i)); |
8610 | if (!rt_rq) | 8598 | if (!rt_rq) |
8611 | goto err; | 8599 | goto err; |
8612 | 8600 | ||
8613 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8601 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
8614 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8602 | GFP_KERNEL, cpu_to_node(i)); |
8615 | if (!rt_se) | 8603 | if (!rt_se) |
8616 | goto err; | 8604 | goto err; |
8617 | 8605 | ||
8618 | parent_se = parent ? parent->rt_se[i] : NULL; | 8606 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
8619 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
8620 | } | 8607 | } |
8621 | 8608 | ||
8622 | return 1; | 8609 | return 1; |
@@ -9259,11 +9246,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
9259 | * (balbir@in.ibm.com). | 9246 | * (balbir@in.ibm.com). |
9260 | */ | 9247 | */ |
9261 | 9248 | ||
9262 | /* track cpu usage of a group of tasks */ | 9249 | /* track cpu usage of a group of tasks and its child groups */ |
9263 | struct cpuacct { | 9250 | struct cpuacct { |
9264 | struct cgroup_subsys_state css; | 9251 | struct cgroup_subsys_state css; |
9265 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9252 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9266 | u64 *cpuusage; | 9253 | u64 *cpuusage; |
9254 | struct cpuacct *parent; | ||
9267 | }; | 9255 | }; |
9268 | 9256 | ||
9269 | struct cgroup_subsys cpuacct_subsys; | 9257 | struct cgroup_subsys cpuacct_subsys; |
@@ -9297,6 +9285,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9297 | return ERR_PTR(-ENOMEM); | 9285 | return ERR_PTR(-ENOMEM); |
9298 | } | 9286 | } |
9299 | 9287 | ||
9288 | if (cgrp->parent) | ||
9289 | ca->parent = cgroup_ca(cgrp->parent); | ||
9290 | |||
9300 | return &ca->css; | 9291 | return &ca->css; |
9301 | } | 9292 | } |
9302 | 9293 | ||
@@ -9310,6 +9301,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9310 | kfree(ca); | 9301 | kfree(ca); |
9311 | } | 9302 | } |
9312 | 9303 | ||
9304 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | ||
9305 | { | ||
9306 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9307 | u64 data; | ||
9308 | |||
9309 | #ifndef CONFIG_64BIT | ||
9310 | /* | ||
9311 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | ||
9312 | */ | ||
9313 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9314 | data = *cpuusage; | ||
9315 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9316 | #else | ||
9317 | data = *cpuusage; | ||
9318 | #endif | ||
9319 | |||
9320 | return data; | ||
9321 | } | ||
9322 | |||
9323 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | ||
9324 | { | ||
9325 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9326 | |||
9327 | #ifndef CONFIG_64BIT | ||
9328 | /* | ||
9329 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | ||
9330 | */ | ||
9331 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9332 | *cpuusage = val; | ||
9333 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9334 | #else | ||
9335 | *cpuusage = val; | ||
9336 | #endif | ||
9337 | } | ||
9338 | |||
9313 | /* return total cpu usage (in nanoseconds) of a group */ | 9339 | /* return total cpu usage (in nanoseconds) of a group */ |
9314 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | 9340 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
9315 | { | 9341 | { |
@@ -9317,17 +9343,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |||
9317 | u64 totalcpuusage = 0; | 9343 | u64 totalcpuusage = 0; |
9318 | int i; | 9344 | int i; |
9319 | 9345 | ||
9320 | for_each_possible_cpu(i) { | 9346 | for_each_present_cpu(i) |
9321 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9347 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
9322 | |||
9323 | /* | ||
9324 | * Take rq->lock to make 64-bit addition safe on 32-bit | ||
9325 | * platforms. | ||
9326 | */ | ||
9327 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9328 | totalcpuusage += *cpuusage; | ||
9329 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9330 | } | ||
9331 | 9348 | ||
9332 | return totalcpuusage; | 9349 | return totalcpuusage; |
9333 | } | 9350 | } |
@@ -9344,23 +9361,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |||
9344 | goto out; | 9361 | goto out; |
9345 | } | 9362 | } |
9346 | 9363 | ||
9347 | for_each_possible_cpu(i) { | 9364 | for_each_present_cpu(i) |
9348 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9365 | cpuacct_cpuusage_write(ca, i, 0); |
9349 | 9366 | ||
9350 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9351 | *cpuusage = 0; | ||
9352 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9353 | } | ||
9354 | out: | 9367 | out: |
9355 | return err; | 9368 | return err; |
9356 | } | 9369 | } |
9357 | 9370 | ||
9371 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
9372 | struct seq_file *m) | ||
9373 | { | ||
9374 | struct cpuacct *ca = cgroup_ca(cgroup); | ||
9375 | u64 percpu; | ||
9376 | int i; | ||
9377 | |||
9378 | for_each_present_cpu(i) { | ||
9379 | percpu = cpuacct_cpuusage_read(ca, i); | ||
9380 | seq_printf(m, "%llu ", (unsigned long long) percpu); | ||
9381 | } | ||
9382 | seq_printf(m, "\n"); | ||
9383 | return 0; | ||
9384 | } | ||
9385 | |||
9358 | static struct cftype files[] = { | 9386 | static struct cftype files[] = { |
9359 | { | 9387 | { |
9360 | .name = "usage", | 9388 | .name = "usage", |
9361 | .read_u64 = cpuusage_read, | 9389 | .read_u64 = cpuusage_read, |
9362 | .write_u64 = cpuusage_write, | 9390 | .write_u64 = cpuusage_write, |
9363 | }, | 9391 | }, |
9392 | { | ||
9393 | .name = "usage_percpu", | ||
9394 | .read_seq_string = cpuacct_percpu_seq_read, | ||
9395 | }, | ||
9396 | |||
9364 | }; | 9397 | }; |
9365 | 9398 | ||
9366 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9399 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
@@ -9376,14 +9409,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9376 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9409 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
9377 | { | 9410 | { |
9378 | struct cpuacct *ca; | 9411 | struct cpuacct *ca; |
9412 | int cpu; | ||
9379 | 9413 | ||
9380 | if (!cpuacct_subsys.active) | 9414 | if (!cpuacct_subsys.active) |
9381 | return; | 9415 | return; |
9382 | 9416 | ||
9417 | cpu = task_cpu(tsk); | ||
9383 | ca = task_ca(tsk); | 9418 | ca = task_ca(tsk); |
9384 | if (ca) { | ||
9385 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
9386 | 9419 | ||
9420 | for (; ca; ca = ca->parent) { | ||
9421 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9387 | *cpuusage += cputime; | 9422 | *cpuusage += cputime; |
9388 | } | 9423 | } |
9389 | } | 9424 | } |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 26ed8e3d1c15..4293cfa9681d 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -53,6 +53,40 @@ static unsigned long nsec_low(unsigned long long nsec) | |||
53 | 53 | ||
54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) | 54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
55 | 55 | ||
56 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
57 | static void print_cfs_group_stats(struct seq_file *m, int cpu, | ||
58 | struct task_group *tg) | ||
59 | { | ||
60 | struct sched_entity *se = tg->se[cpu]; | ||
61 | if (!se) | ||
62 | return; | ||
63 | |||
64 | #define P(F) \ | ||
65 | SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) | ||
66 | #define PN(F) \ | ||
67 | SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) | ||
68 | |||
69 | PN(se->exec_start); | ||
70 | PN(se->vruntime); | ||
71 | PN(se->sum_exec_runtime); | ||
72 | #ifdef CONFIG_SCHEDSTATS | ||
73 | PN(se->wait_start); | ||
74 | PN(se->sleep_start); | ||
75 | PN(se->block_start); | ||
76 | PN(se->sleep_max); | ||
77 | PN(se->block_max); | ||
78 | PN(se->exec_max); | ||
79 | PN(se->slice_max); | ||
80 | PN(se->wait_max); | ||
81 | PN(se->wait_sum); | ||
82 | P(se->wait_count); | ||
83 | #endif | ||
84 | P(se->load.weight); | ||
85 | #undef PN | ||
86 | #undef P | ||
87 | } | ||
88 | #endif | ||
89 | |||
56 | static void | 90 | static void |
57 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 91 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
58 | { | 92 | { |
@@ -121,20 +155,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
121 | 155 | ||
122 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 156 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
123 | char path[128] = ""; | 157 | char path[128] = ""; |
124 | struct cgroup *cgroup = NULL; | ||
125 | struct task_group *tg = cfs_rq->tg; | 158 | struct task_group *tg = cfs_rq->tg; |
126 | 159 | ||
127 | if (tg) | 160 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
128 | cgroup = tg->css.cgroup; | ||
129 | |||
130 | if (cgroup) | ||
131 | cgroup_path(cgroup, path, sizeof(path)); | ||
132 | 161 | ||
133 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); | 162 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); |
163 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
164 | { | ||
165 | uid_t uid = cfs_rq->tg->uid; | ||
166 | SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid); | ||
167 | } | ||
134 | #else | 168 | #else |
135 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); | 169 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); |
136 | #endif | 170 | #endif |
137 | |||
138 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", | 171 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
139 | SPLIT_NS(cfs_rq->exec_clock)); | 172 | SPLIT_NS(cfs_rq->exec_clock)); |
140 | 173 | ||
@@ -168,6 +201,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
168 | #ifdef CONFIG_SMP | 201 | #ifdef CONFIG_SMP |
169 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); | 202 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); |
170 | #endif | 203 | #endif |
204 | print_cfs_group_stats(m, cpu, cfs_rq->tg); | ||
171 | #endif | 205 | #endif |
172 | } | 206 | } |
173 | 207 | ||
@@ -175,14 +209,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | |||
175 | { | 209 | { |
176 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) | 210 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) |
177 | char path[128] = ""; | 211 | char path[128] = ""; |
178 | struct cgroup *cgroup = NULL; | ||
179 | struct task_group *tg = rt_rq->tg; | 212 | struct task_group *tg = rt_rq->tg; |
180 | 213 | ||
181 | if (tg) | 214 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
182 | cgroup = tg->css.cgroup; | ||
183 | |||
184 | if (cgroup) | ||
185 | cgroup_path(cgroup, path, sizeof(path)); | ||
186 | 215 | ||
187 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); | 216 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); |
188 | #else | 217 | #else |
@@ -272,7 +301,7 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
272 | u64 now = ktime_to_ns(ktime_get()); | 301 | u64 now = ktime_to_ns(ktime_get()); |
273 | int cpu; | 302 | int cpu; |
274 | 303 | ||
275 | SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", | 304 | SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", |
276 | init_utsname()->release, | 305 | init_utsname()->release, |
277 | (int)strcspn(init_utsname()->version, " "), | 306 | (int)strcspn(init_utsname()->version, " "), |
278 | init_utsname()->version); | 307 | init_utsname()->version); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 98345e45b059..5ad4440f0fc4 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -492,6 +492,8 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
492 | * overflow on 32 bits): | 492 | * overflow on 32 bits): |
493 | */ | 493 | */ |
494 | delta_exec = (unsigned long)(now - curr->exec_start); | 494 | delta_exec = (unsigned long)(now - curr->exec_start); |
495 | if (!delta_exec) | ||
496 | return; | ||
495 | 497 | ||
496 | __update_curr(cfs_rq, curr, delta_exec); | 498 | __update_curr(cfs_rq, curr, delta_exec); |
497 | curr->exec_start = now; | 499 | curr->exec_start = now; |
@@ -1345,12 +1347,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1345 | { | 1347 | { |
1346 | struct task_struct *curr = rq->curr; | 1348 | struct task_struct *curr = rq->curr; |
1347 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1349 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1348 | 1351 | ||
1349 | if (unlikely(rt_prio(p->prio))) { | 1352 | update_curr(cfs_rq); |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1351 | 1353 | ||
1352 | update_rq_clock(rq); | 1354 | if (unlikely(rt_prio(p->prio))) { |
1353 | update_curr(cfs_rq); | ||
1354 | resched_task(curr); | 1355 | resched_task(curr); |
1355 | return; | 1356 | return; |
1356 | } | 1357 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d9ba9d5f99d6..51d2af3e6191 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -77,7 +77,7 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 79 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
80 | list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | 80 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) |
81 | 81 | ||
82 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 82 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
83 | { | 83 | { |
@@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq) | |||
537 | for_each_sched_rt_entity(rt_se) { | 537 | for_each_sched_rt_entity(rt_se) { |
538 | rt_rq = rt_rq_of_se(rt_se); | 538 | rt_rq = rt_rq_of_se(rt_se); |
539 | 539 | ||
540 | spin_lock(&rt_rq->rt_runtime_lock); | ||
541 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 540 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
541 | spin_lock(&rt_rq->rt_runtime_lock); | ||
542 | rt_rq->rt_time += delta_exec; | 542 | rt_rq->rt_time += delta_exec; |
543 | if (sched_rt_runtime_exceeded(rt_rq)) | 543 | if (sched_rt_runtime_exceeded(rt_rq)) |
544 | resched_task(curr); | 544 | resched_task(curr); |
545 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
545 | } | 546 | } |
546 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
547 | } | 547 | } |
548 | } | 548 | } |
549 | 549 | ||
@@ -909,9 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
909 | /* Only try algorithms three times */ | 909 | /* Only try algorithms three times */ |
910 | #define RT_MAX_TRIES 3 | 910 | #define RT_MAX_TRIES 3 |
911 | 911 | ||
912 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | ||
913 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
914 | |||
915 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 912 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
916 | 913 | ||
917 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 914 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 7dbf72a2b02c..3b01098164c8 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, | 31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, |
32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, |
33 | rq->ttwu_count, rq->ttwu_local, | 33 | rq->ttwu_count, rq->ttwu_local, |
34 | rq->rq_sched_info.cpu_time, | 34 | rq->rq_cpu_time, |
35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); |
36 | 36 | ||
37 | seq_printf(seq, "\n"); | 37 | seq_printf(seq, "\n"); |
@@ -123,7 +123,7 @@ static inline void | |||
123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
124 | { | 124 | { |
125 | if (rq) | 125 | if (rq) |
126 | rq->rq_sched_info.cpu_time += delta; | 126 | rq->rq_cpu_time += delta; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void | 129 | static inline void |
@@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t) | |||
236 | unsigned long long delta = task_rq(t)->clock - | 236 | unsigned long long delta = task_rq(t)->clock - |
237 | t->sched_info.last_arrival; | 237 | t->sched_info.last_arrival; |
238 | 238 | ||
239 | t->sched_info.cpu_time += delta; | ||
240 | rq_sched_info_depart(task_rq(t), delta); | 239 | rq_sched_info_depart(task_rq(t), delta); |
241 | 240 | ||
242 | if (t->state == TASK_RUNNING) | 241 | if (t->state == TASK_RUNNING) |
diff --git a/kernel/signal.c b/kernel/signal.c index e9afe63da24b..8e95855ff3cf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -179,6 +179,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
179 | return sig; | 179 | return sig; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | ||
183 | * allocate a new signal queue record | ||
184 | * - this may be called without locks if and only if t == current, otherwise an | ||
185 | * appopriate lock must be held to stop the target task from exiting | ||
186 | */ | ||
182 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | 187 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, |
183 | int override_rlimit) | 188 | int override_rlimit) |
184 | { | 189 | { |
@@ -186,11 +191,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
186 | struct user_struct *user; | 191 | struct user_struct *user; |
187 | 192 | ||
188 | /* | 193 | /* |
189 | * In order to avoid problems with "switch_user()", we want to make | 194 | * We won't get problems with the target's UID changing under us |
190 | * sure that the compiler doesn't re-load "t->user" | 195 | * because changing it requires RCU be used, and if t != current, the |
196 | * caller must be holding the RCU readlock (by way of a spinlock) and | ||
197 | * we use RCU protection here | ||
191 | */ | 198 | */ |
192 | user = t->user; | 199 | user = get_uid(__task_cred(t)->user); |
193 | barrier(); | ||
194 | atomic_inc(&user->sigpending); | 200 | atomic_inc(&user->sigpending); |
195 | if (override_rlimit || | 201 | if (override_rlimit || |
196 | atomic_read(&user->sigpending) <= | 202 | atomic_read(&user->sigpending) <= |
@@ -198,12 +204,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
198 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 204 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
199 | if (unlikely(q == NULL)) { | 205 | if (unlikely(q == NULL)) { |
200 | atomic_dec(&user->sigpending); | 206 | atomic_dec(&user->sigpending); |
207 | free_uid(user); | ||
201 | } else { | 208 | } else { |
202 | INIT_LIST_HEAD(&q->list); | 209 | INIT_LIST_HEAD(&q->list); |
203 | q->flags = 0; | 210 | q->flags = 0; |
204 | q->user = get_uid(user); | 211 | q->user = user; |
205 | } | 212 | } |
206 | return(q); | 213 | |
214 | return q; | ||
207 | } | 215 | } |
208 | 216 | ||
209 | static void __sigqueue_free(struct sigqueue *q) | 217 | static void __sigqueue_free(struct sigqueue *q) |
@@ -564,10 +572,12 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) | |||
564 | 572 | ||
565 | /* | 573 | /* |
566 | * Bad permissions for sending the signal | 574 | * Bad permissions for sending the signal |
575 | * - the caller must hold at least the RCU read lock | ||
567 | */ | 576 | */ |
568 | static int check_kill_permission(int sig, struct siginfo *info, | 577 | static int check_kill_permission(int sig, struct siginfo *info, |
569 | struct task_struct *t) | 578 | struct task_struct *t) |
570 | { | 579 | { |
580 | const struct cred *cred = current_cred(), *tcred; | ||
571 | struct pid *sid; | 581 | struct pid *sid; |
572 | int error; | 582 | int error; |
573 | 583 | ||
@@ -581,8 +591,11 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
581 | if (error) | 591 | if (error) |
582 | return error; | 592 | return error; |
583 | 593 | ||
584 | if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && | 594 | tcred = __task_cred(t); |
585 | (current->uid ^ t->suid) && (current->uid ^ t->uid) && | 595 | if ((cred->euid ^ tcred->suid) && |
596 | (cred->euid ^ tcred->uid) && | ||
597 | (cred->uid ^ tcred->suid) && | ||
598 | (cred->uid ^ tcred->uid) && | ||
586 | !capable(CAP_KILL)) { | 599 | !capable(CAP_KILL)) { |
587 | switch (sig) { | 600 | switch (sig) { |
588 | case SIGCONT: | 601 | case SIGCONT: |
@@ -846,7 +859,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
846 | q->info.si_errno = 0; | 859 | q->info.si_errno = 0; |
847 | q->info.si_code = SI_USER; | 860 | q->info.si_code = SI_USER; |
848 | q->info.si_pid = task_pid_vnr(current); | 861 | q->info.si_pid = task_pid_vnr(current); |
849 | q->info.si_uid = current->uid; | 862 | q->info.si_uid = current_uid(); |
850 | break; | 863 | break; |
851 | case (unsigned long) SEND_SIG_PRIV: | 864 | case (unsigned long) SEND_SIG_PRIV: |
852 | q->info.si_signo = sig; | 865 | q->info.si_signo = sig; |
@@ -1010,6 +1023,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long | |||
1010 | return sighand; | 1023 | return sighand; |
1011 | } | 1024 | } |
1012 | 1025 | ||
1026 | /* | ||
1027 | * send signal info to all the members of a group | ||
1028 | * - the caller must hold the RCU read lock at least | ||
1029 | */ | ||
1013 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1030 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1014 | { | 1031 | { |
1015 | unsigned long flags; | 1032 | unsigned long flags; |
@@ -1031,8 +1048,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1031 | /* | 1048 | /* |
1032 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty | 1049 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1033 | * control characters do (^C, ^Z etc) | 1050 | * control characters do (^C, ^Z etc) |
1051 | * - the caller must hold at least a readlock on tasklist_lock | ||
1034 | */ | 1052 | */ |
1035 | |||
1036 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) | 1053 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1037 | { | 1054 | { |
1038 | struct task_struct *p = NULL; | 1055 | struct task_struct *p = NULL; |
@@ -1088,6 +1105,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
1088 | { | 1105 | { |
1089 | int ret = -EINVAL; | 1106 | int ret = -EINVAL; |
1090 | struct task_struct *p; | 1107 | struct task_struct *p; |
1108 | const struct cred *pcred; | ||
1091 | 1109 | ||
1092 | if (!valid_signal(sig)) | 1110 | if (!valid_signal(sig)) |
1093 | return ret; | 1111 | return ret; |
@@ -1098,9 +1116,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
1098 | ret = -ESRCH; | 1116 | ret = -ESRCH; |
1099 | goto out_unlock; | 1117 | goto out_unlock; |
1100 | } | 1118 | } |
1101 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) | 1119 | pcred = __task_cred(p); |
1102 | && (euid != p->suid) && (euid != p->uid) | 1120 | if ((info == SEND_SIG_NOINFO || |
1103 | && (uid != p->suid) && (uid != p->uid)) { | 1121 | (!is_si_special(info) && SI_FROMUSER(info))) && |
1122 | euid != pcred->suid && euid != pcred->uid && | ||
1123 | uid != pcred->suid && uid != pcred->uid) { | ||
1104 | ret = -EPERM; | 1124 | ret = -EPERM; |
1105 | goto out_unlock; | 1125 | goto out_unlock; |
1106 | } | 1126 | } |
@@ -1371,10 +1391,9 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
1371 | */ | 1391 | */ |
1372 | rcu_read_lock(); | 1392 | rcu_read_lock(); |
1373 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); | 1393 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1394 | info.si_uid = __task_cred(tsk)->uid; | ||
1374 | rcu_read_unlock(); | 1395 | rcu_read_unlock(); |
1375 | 1396 | ||
1376 | info.si_uid = tsk->uid; | ||
1377 | |||
1378 | thread_group_cputime(tsk, &cputime); | 1397 | thread_group_cputime(tsk, &cputime); |
1379 | info.si_utime = cputime_to_jiffies(cputime.utime); | 1398 | info.si_utime = cputime_to_jiffies(cputime.utime); |
1380 | info.si_stime = cputime_to_jiffies(cputime.stime); | 1399 | info.si_stime = cputime_to_jiffies(cputime.stime); |
@@ -1442,10 +1461,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1442 | */ | 1461 | */ |
1443 | rcu_read_lock(); | 1462 | rcu_read_lock(); |
1444 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); | 1463 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1464 | info.si_uid = __task_cred(tsk)->uid; | ||
1445 | rcu_read_unlock(); | 1465 | rcu_read_unlock(); |
1446 | 1466 | ||
1447 | info.si_uid = tsk->uid; | ||
1448 | |||
1449 | info.si_utime = cputime_to_clock_t(tsk->utime); | 1467 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1450 | info.si_stime = cputime_to_clock_t(tsk->stime); | 1468 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1451 | 1469 | ||
@@ -1600,7 +1618,7 @@ void ptrace_notify(int exit_code) | |||
1600 | info.si_signo = SIGTRAP; | 1618 | info.si_signo = SIGTRAP; |
1601 | info.si_code = exit_code; | 1619 | info.si_code = exit_code; |
1602 | info.si_pid = task_pid_vnr(current); | 1620 | info.si_pid = task_pid_vnr(current); |
1603 | info.si_uid = current->uid; | 1621 | info.si_uid = current_uid(); |
1604 | 1622 | ||
1605 | /* Let the debugger run. */ | 1623 | /* Let the debugger run. */ |
1606 | spin_lock_irq(¤t->sighand->siglock); | 1624 | spin_lock_irq(¤t->sighand->siglock); |
@@ -1712,7 +1730,7 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1712 | info->si_errno = 0; | 1730 | info->si_errno = 0; |
1713 | info->si_code = SI_USER; | 1731 | info->si_code = SI_USER; |
1714 | info->si_pid = task_pid_vnr(current->parent); | 1732 | info->si_pid = task_pid_vnr(current->parent); |
1715 | info->si_uid = current->parent->uid; | 1733 | info->si_uid = task_uid(current->parent); |
1716 | } | 1734 | } |
1717 | 1735 | ||
1718 | /* If the (new) signal is now blocked, requeue it. */ | 1736 | /* If the (new) signal is now blocked, requeue it. */ |
@@ -2213,7 +2231,7 @@ sys_kill(pid_t pid, int sig) | |||
2213 | info.si_errno = 0; | 2231 | info.si_errno = 0; |
2214 | info.si_code = SI_USER; | 2232 | info.si_code = SI_USER; |
2215 | info.si_pid = task_tgid_vnr(current); | 2233 | info.si_pid = task_tgid_vnr(current); |
2216 | info.si_uid = current->uid; | 2234 | info.si_uid = current_uid(); |
2217 | 2235 | ||
2218 | return kill_something_info(sig, &info, pid); | 2236 | return kill_something_info(sig, &info, pid); |
2219 | } | 2237 | } |
@@ -2230,7 +2248,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig) | |||
2230 | info.si_errno = 0; | 2248 | info.si_errno = 0; |
2231 | info.si_code = SI_TKILL; | 2249 | info.si_code = SI_TKILL; |
2232 | info.si_pid = task_tgid_vnr(current); | 2250 | info.si_pid = task_tgid_vnr(current); |
2233 | info.si_uid = current->uid; | 2251 | info.si_uid = current_uid(); |
2234 | 2252 | ||
2235 | rcu_read_lock(); | 2253 | rcu_read_lock(); |
2236 | p = find_task_by_vpid(pid); | 2254 | p = find_task_by_vpid(pid); |
diff --git a/kernel/sys.c b/kernel/sys.c index 31deba8f7d16..ebe65c2c9873 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -112,12 +112,17 @@ EXPORT_SYMBOL(cad_pid); | |||
112 | 112 | ||
113 | void (*pm_power_off_prepare)(void); | 113 | void (*pm_power_off_prepare)(void); |
114 | 114 | ||
115 | /* | ||
116 | * set the priority of a task | ||
117 | * - the caller must hold the RCU read lock | ||
118 | */ | ||
115 | static int set_one_prio(struct task_struct *p, int niceval, int error) | 119 | static int set_one_prio(struct task_struct *p, int niceval, int error) |
116 | { | 120 | { |
121 | const struct cred *cred = current_cred(), *pcred = __task_cred(p); | ||
117 | int no_nice; | 122 | int no_nice; |
118 | 123 | ||
119 | if (p->uid != current->euid && | 124 | if (pcred->uid != cred->euid && |
120 | p->euid != current->euid && !capable(CAP_SYS_NICE)) { | 125 | pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) { |
121 | error = -EPERM; | 126 | error = -EPERM; |
122 | goto out; | 127 | goto out; |
123 | } | 128 | } |
@@ -141,6 +146,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
141 | { | 146 | { |
142 | struct task_struct *g, *p; | 147 | struct task_struct *g, *p; |
143 | struct user_struct *user; | 148 | struct user_struct *user; |
149 | const struct cred *cred = current_cred(); | ||
144 | int error = -EINVAL; | 150 | int error = -EINVAL; |
145 | struct pid *pgrp; | 151 | struct pid *pgrp; |
146 | 152 | ||
@@ -174,18 +180,18 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
174 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 180 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
175 | break; | 181 | break; |
176 | case PRIO_USER: | 182 | case PRIO_USER: |
177 | user = current->user; | 183 | user = (struct user_struct *) cred->user; |
178 | if (!who) | 184 | if (!who) |
179 | who = current->uid; | 185 | who = cred->uid; |
180 | else | 186 | else if ((who != cred->uid) && |
181 | if ((who != current->uid) && !(user = find_user(who))) | 187 | !(user = find_user(who))) |
182 | goto out_unlock; /* No processes for this user */ | 188 | goto out_unlock; /* No processes for this user */ |
183 | 189 | ||
184 | do_each_thread(g, p) | 190 | do_each_thread(g, p) |
185 | if (p->uid == who) | 191 | if (__task_cred(p)->uid == who) |
186 | error = set_one_prio(p, niceval, error); | 192 | error = set_one_prio(p, niceval, error); |
187 | while_each_thread(g, p); | 193 | while_each_thread(g, p); |
188 | if (who != current->uid) | 194 | if (who != cred->uid) |
189 | free_uid(user); /* For find_user() */ | 195 | free_uid(user); /* For find_user() */ |
190 | break; | 196 | break; |
191 | } | 197 | } |
@@ -205,6 +211,7 @@ asmlinkage long sys_getpriority(int which, int who) | |||
205 | { | 211 | { |
206 | struct task_struct *g, *p; | 212 | struct task_struct *g, *p; |
207 | struct user_struct *user; | 213 | struct user_struct *user; |
214 | const struct cred *cred = current_cred(); | ||
208 | long niceval, retval = -ESRCH; | 215 | long niceval, retval = -ESRCH; |
209 | struct pid *pgrp; | 216 | struct pid *pgrp; |
210 | 217 | ||
@@ -236,21 +243,21 @@ asmlinkage long sys_getpriority(int which, int who) | |||
236 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 243 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
237 | break; | 244 | break; |
238 | case PRIO_USER: | 245 | case PRIO_USER: |
239 | user = current->user; | 246 | user = (struct user_struct *) cred->user; |
240 | if (!who) | 247 | if (!who) |
241 | who = current->uid; | 248 | who = cred->uid; |
242 | else | 249 | else if ((who != cred->uid) && |
243 | if ((who != current->uid) && !(user = find_user(who))) | 250 | !(user = find_user(who))) |
244 | goto out_unlock; /* No processes for this user */ | 251 | goto out_unlock; /* No processes for this user */ |
245 | 252 | ||
246 | do_each_thread(g, p) | 253 | do_each_thread(g, p) |
247 | if (p->uid == who) { | 254 | if (__task_cred(p)->uid == who) { |
248 | niceval = 20 - task_nice(p); | 255 | niceval = 20 - task_nice(p); |
249 | if (niceval > retval) | 256 | if (niceval > retval) |
250 | retval = niceval; | 257 | retval = niceval; |
251 | } | 258 | } |
252 | while_each_thread(g, p); | 259 | while_each_thread(g, p); |
253 | if (who != current->uid) | 260 | if (who != cred->uid) |
254 | free_uid(user); /* for find_user() */ | 261 | free_uid(user); /* for find_user() */ |
255 | break; | 262 | break; |
256 | } | 263 | } |
@@ -472,46 +479,48 @@ void ctrl_alt_del(void) | |||
472 | */ | 479 | */ |
473 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | 480 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) |
474 | { | 481 | { |
475 | int old_rgid = current->gid; | 482 | const struct cred *old; |
476 | int old_egid = current->egid; | 483 | struct cred *new; |
477 | int new_rgid = old_rgid; | ||
478 | int new_egid = old_egid; | ||
479 | int retval; | 484 | int retval; |
480 | 485 | ||
486 | new = prepare_creds(); | ||
487 | if (!new) | ||
488 | return -ENOMEM; | ||
489 | old = current_cred(); | ||
490 | |||
481 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); | 491 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); |
482 | if (retval) | 492 | if (retval) |
483 | return retval; | 493 | goto error; |
484 | 494 | ||
495 | retval = -EPERM; | ||
485 | if (rgid != (gid_t) -1) { | 496 | if (rgid != (gid_t) -1) { |
486 | if ((old_rgid == rgid) || | 497 | if (old->gid == rgid || |
487 | (current->egid==rgid) || | 498 | old->egid == rgid || |
488 | capable(CAP_SETGID)) | 499 | capable(CAP_SETGID)) |
489 | new_rgid = rgid; | 500 | new->gid = rgid; |
490 | else | 501 | else |
491 | return -EPERM; | 502 | goto error; |
492 | } | 503 | } |
493 | if (egid != (gid_t) -1) { | 504 | if (egid != (gid_t) -1) { |
494 | if ((old_rgid == egid) || | 505 | if (old->gid == egid || |
495 | (current->egid == egid) || | 506 | old->egid == egid || |
496 | (current->sgid == egid) || | 507 | old->sgid == egid || |
497 | capable(CAP_SETGID)) | 508 | capable(CAP_SETGID)) |
498 | new_egid = egid; | 509 | new->egid = egid; |
499 | else | 510 | else |
500 | return -EPERM; | 511 | goto error; |
501 | } | ||
502 | if (new_egid != old_egid) { | ||
503 | set_dumpable(current->mm, suid_dumpable); | ||
504 | smp_wmb(); | ||
505 | } | 512 | } |
513 | |||
506 | if (rgid != (gid_t) -1 || | 514 | if (rgid != (gid_t) -1 || |
507 | (egid != (gid_t) -1 && egid != old_rgid)) | 515 | (egid != (gid_t) -1 && egid != old->gid)) |
508 | current->sgid = new_egid; | 516 | new->sgid = new->egid; |
509 | current->fsgid = new_egid; | 517 | new->fsgid = new->egid; |
510 | current->egid = new_egid; | 518 | |
511 | current->gid = new_rgid; | 519 | return commit_creds(new); |
512 | key_fsgid_changed(current); | 520 | |
513 | proc_id_connector(current, PROC_EVENT_GID); | 521 | error: |
514 | return 0; | 522 | abort_creds(new); |
523 | return retval; | ||
515 | } | 524 | } |
516 | 525 | ||
517 | /* | 526 | /* |
@@ -521,56 +530,54 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | |||
521 | */ | 530 | */ |
522 | asmlinkage long sys_setgid(gid_t gid) | 531 | asmlinkage long sys_setgid(gid_t gid) |
523 | { | 532 | { |
524 | int old_egid = current->egid; | 533 | const struct cred *old; |
534 | struct cred *new; | ||
525 | int retval; | 535 | int retval; |
526 | 536 | ||
537 | new = prepare_creds(); | ||
538 | if (!new) | ||
539 | return -ENOMEM; | ||
540 | old = current_cred(); | ||
541 | |||
527 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); | 542 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); |
528 | if (retval) | 543 | if (retval) |
529 | return retval; | 544 | goto error; |
530 | 545 | ||
531 | if (capable(CAP_SETGID)) { | 546 | retval = -EPERM; |
532 | if (old_egid != gid) { | 547 | if (capable(CAP_SETGID)) |
533 | set_dumpable(current->mm, suid_dumpable); | 548 | new->gid = new->egid = new->sgid = new->fsgid = gid; |
534 | smp_wmb(); | 549 | else if (gid == old->gid || gid == old->sgid) |
535 | } | 550 | new->egid = new->fsgid = gid; |
536 | current->gid = current->egid = current->sgid = current->fsgid = gid; | ||
537 | } else if ((gid == current->gid) || (gid == current->sgid)) { | ||
538 | if (old_egid != gid) { | ||
539 | set_dumpable(current->mm, suid_dumpable); | ||
540 | smp_wmb(); | ||
541 | } | ||
542 | current->egid = current->fsgid = gid; | ||
543 | } | ||
544 | else | 551 | else |
545 | return -EPERM; | 552 | goto error; |
546 | 553 | ||
547 | key_fsgid_changed(current); | 554 | return commit_creds(new); |
548 | proc_id_connector(current, PROC_EVENT_GID); | 555 | |
549 | return 0; | 556 | error: |
557 | abort_creds(new); | ||
558 | return retval; | ||
550 | } | 559 | } |
551 | 560 | ||
552 | static int set_user(uid_t new_ruid, int dumpclear) | 561 | /* |
562 | * change the user struct in a credentials set to match the new UID | ||
563 | */ | ||
564 | static int set_user(struct cred *new) | ||
553 | { | 565 | { |
554 | struct user_struct *new_user; | 566 | struct user_struct *new_user; |
555 | 567 | ||
556 | new_user = alloc_uid(current->nsproxy->user_ns, new_ruid); | 568 | new_user = alloc_uid(current_user_ns(), new->uid); |
557 | if (!new_user) | 569 | if (!new_user) |
558 | return -EAGAIN; | 570 | return -EAGAIN; |
559 | 571 | ||
560 | if (atomic_read(&new_user->processes) >= | 572 | if (atomic_read(&new_user->processes) >= |
561 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | 573 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && |
562 | new_user != current->nsproxy->user_ns->root_user) { | 574 | new_user != INIT_USER) { |
563 | free_uid(new_user); | 575 | free_uid(new_user); |
564 | return -EAGAIN; | 576 | return -EAGAIN; |
565 | } | 577 | } |
566 | 578 | ||
567 | switch_uid(new_user); | 579 | free_uid(new->user); |
568 | 580 | new->user = new_user; | |
569 | if (dumpclear) { | ||
570 | set_dumpable(current->mm, suid_dumpable); | ||
571 | smp_wmb(); | ||
572 | } | ||
573 | current->uid = new_ruid; | ||
574 | return 0; | 581 | return 0; |
575 | } | 582 | } |
576 | 583 | ||
@@ -591,54 +598,56 @@ static int set_user(uid_t new_ruid, int dumpclear) | |||
591 | */ | 598 | */ |
592 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | 599 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) |
593 | { | 600 | { |
594 | int old_ruid, old_euid, old_suid, new_ruid, new_euid; | 601 | const struct cred *old; |
602 | struct cred *new; | ||
595 | int retval; | 603 | int retval; |
596 | 604 | ||
605 | new = prepare_creds(); | ||
606 | if (!new) | ||
607 | return -ENOMEM; | ||
608 | old = current_cred(); | ||
609 | |||
597 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); | 610 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); |
598 | if (retval) | 611 | if (retval) |
599 | return retval; | 612 | goto error; |
600 | |||
601 | new_ruid = old_ruid = current->uid; | ||
602 | new_euid = old_euid = current->euid; | ||
603 | old_suid = current->suid; | ||
604 | 613 | ||
614 | retval = -EPERM; | ||
605 | if (ruid != (uid_t) -1) { | 615 | if (ruid != (uid_t) -1) { |
606 | new_ruid = ruid; | 616 | new->uid = ruid; |
607 | if ((old_ruid != ruid) && | 617 | if (old->uid != ruid && |
608 | (current->euid != ruid) && | 618 | old->euid != ruid && |
609 | !capable(CAP_SETUID)) | 619 | !capable(CAP_SETUID)) |
610 | return -EPERM; | 620 | goto error; |
611 | } | 621 | } |
612 | 622 | ||
613 | if (euid != (uid_t) -1) { | 623 | if (euid != (uid_t) -1) { |
614 | new_euid = euid; | 624 | new->euid = euid; |
615 | if ((old_ruid != euid) && | 625 | if (old->uid != euid && |
616 | (current->euid != euid) && | 626 | old->euid != euid && |
617 | (current->suid != euid) && | 627 | old->suid != euid && |
618 | !capable(CAP_SETUID)) | 628 | !capable(CAP_SETUID)) |
619 | return -EPERM; | 629 | goto error; |
620 | } | 630 | } |
621 | 631 | ||
622 | if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) | 632 | retval = -EAGAIN; |
623 | return -EAGAIN; | 633 | if (new->uid != old->uid && set_user(new) < 0) |
634 | goto error; | ||
624 | 635 | ||
625 | if (new_euid != old_euid) { | ||
626 | set_dumpable(current->mm, suid_dumpable); | ||
627 | smp_wmb(); | ||
628 | } | ||
629 | current->fsuid = current->euid = new_euid; | ||
630 | if (ruid != (uid_t) -1 || | 636 | if (ruid != (uid_t) -1 || |
631 | (euid != (uid_t) -1 && euid != old_ruid)) | 637 | (euid != (uid_t) -1 && euid != old->uid)) |
632 | current->suid = current->euid; | 638 | new->suid = new->euid; |
633 | current->fsuid = current->euid; | 639 | new->fsuid = new->euid; |
634 | 640 | ||
635 | key_fsuid_changed(current); | 641 | retval = security_task_fix_setuid(new, old, LSM_SETID_RE); |
636 | proc_id_connector(current, PROC_EVENT_UID); | 642 | if (retval < 0) |
637 | 643 | goto error; | |
638 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); | ||
639 | } | ||
640 | 644 | ||
645 | return commit_creds(new); | ||
641 | 646 | ||
647 | error: | ||
648 | abort_creds(new); | ||
649 | return retval; | ||
650 | } | ||
642 | 651 | ||
643 | /* | 652 | /* |
644 | * setuid() is implemented like SysV with SAVED_IDS | 653 | * setuid() is implemented like SysV with SAVED_IDS |
@@ -653,36 +662,41 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | |||
653 | */ | 662 | */ |
654 | asmlinkage long sys_setuid(uid_t uid) | 663 | asmlinkage long sys_setuid(uid_t uid) |
655 | { | 664 | { |
656 | int old_euid = current->euid; | 665 | const struct cred *old; |
657 | int old_ruid, old_suid, new_suid; | 666 | struct cred *new; |
658 | int retval; | 667 | int retval; |
659 | 668 | ||
669 | new = prepare_creds(); | ||
670 | if (!new) | ||
671 | return -ENOMEM; | ||
672 | old = current_cred(); | ||
673 | |||
660 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); | 674 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); |
661 | if (retval) | 675 | if (retval) |
662 | return retval; | 676 | goto error; |
663 | 677 | ||
664 | old_ruid = current->uid; | 678 | retval = -EPERM; |
665 | old_suid = current->suid; | ||
666 | new_suid = old_suid; | ||
667 | |||
668 | if (capable(CAP_SETUID)) { | 679 | if (capable(CAP_SETUID)) { |
669 | if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) | 680 | new->suid = new->uid = uid; |
670 | return -EAGAIN; | 681 | if (uid != old->uid && set_user(new) < 0) { |
671 | new_suid = uid; | 682 | retval = -EAGAIN; |
672 | } else if ((uid != current->uid) && (uid != new_suid)) | 683 | goto error; |
673 | return -EPERM; | 684 | } |
674 | 685 | } else if (uid != old->uid && uid != new->suid) { | |
675 | if (old_euid != uid) { | 686 | goto error; |
676 | set_dumpable(current->mm, suid_dumpable); | ||
677 | smp_wmb(); | ||
678 | } | 687 | } |
679 | current->fsuid = current->euid = uid; | ||
680 | current->suid = new_suid; | ||
681 | 688 | ||
682 | key_fsuid_changed(current); | 689 | new->fsuid = new->euid = uid; |
683 | proc_id_connector(current, PROC_EVENT_UID); | 690 | |
691 | retval = security_task_fix_setuid(new, old, LSM_SETID_ID); | ||
692 | if (retval < 0) | ||
693 | goto error; | ||
684 | 694 | ||
685 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); | 695 | return commit_creds(new); |
696 | |||
697 | error: | ||
698 | abort_creds(new); | ||
699 | return retval; | ||
686 | } | 700 | } |
687 | 701 | ||
688 | 702 | ||
@@ -692,54 +706,63 @@ asmlinkage long sys_setuid(uid_t uid) | |||
692 | */ | 706 | */ |
693 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) | 707 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) |
694 | { | 708 | { |
695 | int old_ruid = current->uid; | 709 | const struct cred *old; |
696 | int old_euid = current->euid; | 710 | struct cred *new; |
697 | int old_suid = current->suid; | ||
698 | int retval; | 711 | int retval; |
699 | 712 | ||
713 | new = prepare_creds(); | ||
714 | if (!new) | ||
715 | return -ENOMEM; | ||
716 | |||
700 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); | 717 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); |
701 | if (retval) | 718 | if (retval) |
702 | return retval; | 719 | goto error; |
720 | old = current_cred(); | ||
703 | 721 | ||
722 | retval = -EPERM; | ||
704 | if (!capable(CAP_SETUID)) { | 723 | if (!capable(CAP_SETUID)) { |
705 | if ((ruid != (uid_t) -1) && (ruid != current->uid) && | 724 | if (ruid != (uid_t) -1 && ruid != old->uid && |
706 | (ruid != current->euid) && (ruid != current->suid)) | 725 | ruid != old->euid && ruid != old->suid) |
707 | return -EPERM; | 726 | goto error; |
708 | if ((euid != (uid_t) -1) && (euid != current->uid) && | 727 | if (euid != (uid_t) -1 && euid != old->uid && |
709 | (euid != current->euid) && (euid != current->suid)) | 728 | euid != old->euid && euid != old->suid) |
710 | return -EPERM; | 729 | goto error; |
711 | if ((suid != (uid_t) -1) && (suid != current->uid) && | 730 | if (suid != (uid_t) -1 && suid != old->uid && |
712 | (suid != current->euid) && (suid != current->suid)) | 731 | suid != old->euid && suid != old->suid) |
713 | return -EPERM; | 732 | goto error; |
714 | } | 733 | } |
734 | |||
735 | retval = -EAGAIN; | ||
715 | if (ruid != (uid_t) -1) { | 736 | if (ruid != (uid_t) -1) { |
716 | if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) | 737 | new->uid = ruid; |
717 | return -EAGAIN; | 738 | if (ruid != old->uid && set_user(new) < 0) |
739 | goto error; | ||
718 | } | 740 | } |
719 | if (euid != (uid_t) -1) { | 741 | if (euid != (uid_t) -1) |
720 | if (euid != current->euid) { | 742 | new->euid = euid; |
721 | set_dumpable(current->mm, suid_dumpable); | ||
722 | smp_wmb(); | ||
723 | } | ||
724 | current->euid = euid; | ||
725 | } | ||
726 | current->fsuid = current->euid; | ||
727 | if (suid != (uid_t) -1) | 743 | if (suid != (uid_t) -1) |
728 | current->suid = suid; | 744 | new->suid = suid; |
745 | new->fsuid = new->euid; | ||
746 | |||
747 | retval = security_task_fix_setuid(new, old, LSM_SETID_RES); | ||
748 | if (retval < 0) | ||
749 | goto error; | ||
729 | 750 | ||
730 | key_fsuid_changed(current); | 751 | return commit_creds(new); |
731 | proc_id_connector(current, PROC_EVENT_UID); | ||
732 | 752 | ||
733 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); | 753 | error: |
754 | abort_creds(new); | ||
755 | return retval; | ||
734 | } | 756 | } |
735 | 757 | ||
736 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) | 758 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) |
737 | { | 759 | { |
760 | const struct cred *cred = current_cred(); | ||
738 | int retval; | 761 | int retval; |
739 | 762 | ||
740 | if (!(retval = put_user(current->uid, ruid)) && | 763 | if (!(retval = put_user(cred->uid, ruid)) && |
741 | !(retval = put_user(current->euid, euid))) | 764 | !(retval = put_user(cred->euid, euid))) |
742 | retval = put_user(current->suid, suid); | 765 | retval = put_user(cred->suid, suid); |
743 | 766 | ||
744 | return retval; | 767 | return retval; |
745 | } | 768 | } |
@@ -749,48 +772,55 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us | |||
749 | */ | 772 | */ |
750 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) | 773 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) |
751 | { | 774 | { |
775 | const struct cred *old; | ||
776 | struct cred *new; | ||
752 | int retval; | 777 | int retval; |
753 | 778 | ||
779 | new = prepare_creds(); | ||
780 | if (!new) | ||
781 | return -ENOMEM; | ||
782 | old = current_cred(); | ||
783 | |||
754 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); | 784 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); |
755 | if (retval) | 785 | if (retval) |
756 | return retval; | 786 | goto error; |
757 | 787 | ||
788 | retval = -EPERM; | ||
758 | if (!capable(CAP_SETGID)) { | 789 | if (!capable(CAP_SETGID)) { |
759 | if ((rgid != (gid_t) -1) && (rgid != current->gid) && | 790 | if (rgid != (gid_t) -1 && rgid != old->gid && |
760 | (rgid != current->egid) && (rgid != current->sgid)) | 791 | rgid != old->egid && rgid != old->sgid) |
761 | return -EPERM; | 792 | goto error; |
762 | if ((egid != (gid_t) -1) && (egid != current->gid) && | 793 | if (egid != (gid_t) -1 && egid != old->gid && |
763 | (egid != current->egid) && (egid != current->sgid)) | 794 | egid != old->egid && egid != old->sgid) |
764 | return -EPERM; | 795 | goto error; |
765 | if ((sgid != (gid_t) -1) && (sgid != current->gid) && | 796 | if (sgid != (gid_t) -1 && sgid != old->gid && |
766 | (sgid != current->egid) && (sgid != current->sgid)) | 797 | sgid != old->egid && sgid != old->sgid) |
767 | return -EPERM; | 798 | goto error; |
768 | } | 799 | } |
769 | if (egid != (gid_t) -1) { | 800 | |
770 | if (egid != current->egid) { | ||
771 | set_dumpable(current->mm, suid_dumpable); | ||
772 | smp_wmb(); | ||
773 | } | ||
774 | current->egid = egid; | ||
775 | } | ||
776 | current->fsgid = current->egid; | ||
777 | if (rgid != (gid_t) -1) | 801 | if (rgid != (gid_t) -1) |
778 | current->gid = rgid; | 802 | new->gid = rgid; |
803 | if (egid != (gid_t) -1) | ||
804 | new->egid = egid; | ||
779 | if (sgid != (gid_t) -1) | 805 | if (sgid != (gid_t) -1) |
780 | current->sgid = sgid; | 806 | new->sgid = sgid; |
807 | new->fsgid = new->egid; | ||
781 | 808 | ||
782 | key_fsgid_changed(current); | 809 | return commit_creds(new); |
783 | proc_id_connector(current, PROC_EVENT_GID); | 810 | |
784 | return 0; | 811 | error: |
812 | abort_creds(new); | ||
813 | return retval; | ||
785 | } | 814 | } |
786 | 815 | ||
787 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) | 816 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) |
788 | { | 817 | { |
818 | const struct cred *cred = current_cred(); | ||
789 | int retval; | 819 | int retval; |
790 | 820 | ||
791 | if (!(retval = put_user(current->gid, rgid)) && | 821 | if (!(retval = put_user(cred->gid, rgid)) && |
792 | !(retval = put_user(current->egid, egid))) | 822 | !(retval = put_user(cred->egid, egid))) |
793 | retval = put_user(current->sgid, sgid); | 823 | retval = put_user(cred->sgid, sgid); |
794 | 824 | ||
795 | return retval; | 825 | return retval; |
796 | } | 826 | } |
@@ -804,27 +834,35 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us | |||
804 | */ | 834 | */ |
805 | asmlinkage long sys_setfsuid(uid_t uid) | 835 | asmlinkage long sys_setfsuid(uid_t uid) |
806 | { | 836 | { |
807 | int old_fsuid; | 837 | const struct cred *old; |
838 | struct cred *new; | ||
839 | uid_t old_fsuid; | ||
808 | 840 | ||
809 | old_fsuid = current->fsuid; | 841 | new = prepare_creds(); |
810 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) | 842 | if (!new) |
811 | return old_fsuid; | 843 | return current_fsuid(); |
844 | old = current_cred(); | ||
845 | old_fsuid = old->fsuid; | ||
812 | 846 | ||
813 | if (uid == current->uid || uid == current->euid || | 847 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0) |
814 | uid == current->suid || uid == current->fsuid || | 848 | goto error; |
849 | |||
850 | if (uid == old->uid || uid == old->euid || | ||
851 | uid == old->suid || uid == old->fsuid || | ||
815 | capable(CAP_SETUID)) { | 852 | capable(CAP_SETUID)) { |
816 | if (uid != old_fsuid) { | 853 | if (uid != old_fsuid) { |
817 | set_dumpable(current->mm, suid_dumpable); | 854 | new->fsuid = uid; |
818 | smp_wmb(); | 855 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) |
856 | goto change_okay; | ||
819 | } | 857 | } |
820 | current->fsuid = uid; | ||
821 | } | 858 | } |
822 | 859 | ||
823 | key_fsuid_changed(current); | 860 | error: |
824 | proc_id_connector(current, PROC_EVENT_UID); | 861 | abort_creds(new); |
825 | 862 | return old_fsuid; | |
826 | security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); | ||
827 | 863 | ||
864 | change_okay: | ||
865 | commit_creds(new); | ||
828 | return old_fsuid; | 866 | return old_fsuid; |
829 | } | 867 | } |
830 | 868 | ||
@@ -833,23 +871,34 @@ asmlinkage long sys_setfsuid(uid_t uid) | |||
833 | */ | 871 | */ |
834 | asmlinkage long sys_setfsgid(gid_t gid) | 872 | asmlinkage long sys_setfsgid(gid_t gid) |
835 | { | 873 | { |
836 | int old_fsgid; | 874 | const struct cred *old; |
875 | struct cred *new; | ||
876 | gid_t old_fsgid; | ||
877 | |||
878 | new = prepare_creds(); | ||
879 | if (!new) | ||
880 | return current_fsgid(); | ||
881 | old = current_cred(); | ||
882 | old_fsgid = old->fsgid; | ||
837 | 883 | ||
838 | old_fsgid = current->fsgid; | ||
839 | if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) | 884 | if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) |
840 | return old_fsgid; | 885 | goto error; |
841 | 886 | ||
842 | if (gid == current->gid || gid == current->egid || | 887 | if (gid == old->gid || gid == old->egid || |
843 | gid == current->sgid || gid == current->fsgid || | 888 | gid == old->sgid || gid == old->fsgid || |
844 | capable(CAP_SETGID)) { | 889 | capable(CAP_SETGID)) { |
845 | if (gid != old_fsgid) { | 890 | if (gid != old_fsgid) { |
846 | set_dumpable(current->mm, suid_dumpable); | 891 | new->fsgid = gid; |
847 | smp_wmb(); | 892 | goto change_okay; |
848 | } | 893 | } |
849 | current->fsgid = gid; | ||
850 | key_fsgid_changed(current); | ||
851 | proc_id_connector(current, PROC_EVENT_GID); | ||
852 | } | 894 | } |
895 | |||
896 | error: | ||
897 | abort_creds(new); | ||
898 | return old_fsgid; | ||
899 | |||
900 | change_okay: | ||
901 | commit_creds(new); | ||
853 | return old_fsgid; | 902 | return old_fsgid; |
854 | } | 903 | } |
855 | 904 | ||
@@ -1118,7 +1167,7 @@ EXPORT_SYMBOL(groups_free); | |||
1118 | 1167 | ||
1119 | /* export the group_info to a user-space array */ | 1168 | /* export the group_info to a user-space array */ |
1120 | static int groups_to_user(gid_t __user *grouplist, | 1169 | static int groups_to_user(gid_t __user *grouplist, |
1121 | struct group_info *group_info) | 1170 | const struct group_info *group_info) |
1122 | { | 1171 | { |
1123 | int i; | 1172 | int i; |
1124 | unsigned int count = group_info->ngroups; | 1173 | unsigned int count = group_info->ngroups; |
@@ -1186,7 +1235,7 @@ static void groups_sort(struct group_info *group_info) | |||
1186 | } | 1235 | } |
1187 | 1236 | ||
1188 | /* a simple bsearch */ | 1237 | /* a simple bsearch */ |
1189 | int groups_search(struct group_info *group_info, gid_t grp) | 1238 | int groups_search(const struct group_info *group_info, gid_t grp) |
1190 | { | 1239 | { |
1191 | unsigned int left, right; | 1240 | unsigned int left, right; |
1192 | 1241 | ||
@@ -1208,51 +1257,74 @@ int groups_search(struct group_info *group_info, gid_t grp) | |||
1208 | return 0; | 1257 | return 0; |
1209 | } | 1258 | } |
1210 | 1259 | ||
1211 | /* validate and set current->group_info */ | 1260 | /** |
1212 | int set_current_groups(struct group_info *group_info) | 1261 | * set_groups - Change a group subscription in a set of credentials |
1262 | * @new: The newly prepared set of credentials to alter | ||
1263 | * @group_info: The group list to install | ||
1264 | * | ||
1265 | * Validate a group subscription and, if valid, insert it into a set | ||
1266 | * of credentials. | ||
1267 | */ | ||
1268 | int set_groups(struct cred *new, struct group_info *group_info) | ||
1213 | { | 1269 | { |
1214 | int retval; | 1270 | int retval; |
1215 | struct group_info *old_info; | ||
1216 | 1271 | ||
1217 | retval = security_task_setgroups(group_info); | 1272 | retval = security_task_setgroups(group_info); |
1218 | if (retval) | 1273 | if (retval) |
1219 | return retval; | 1274 | return retval; |
1220 | 1275 | ||
1276 | put_group_info(new->group_info); | ||
1221 | groups_sort(group_info); | 1277 | groups_sort(group_info); |
1222 | get_group_info(group_info); | 1278 | get_group_info(group_info); |
1279 | new->group_info = group_info; | ||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | EXPORT_SYMBOL(set_groups); | ||
1223 | 1284 | ||
1224 | task_lock(current); | 1285 | /** |
1225 | old_info = current->group_info; | 1286 | * set_current_groups - Change current's group subscription |
1226 | current->group_info = group_info; | 1287 | * @group_info: The group list to impose |
1227 | task_unlock(current); | 1288 | * |
1289 | * Validate a group subscription and, if valid, impose it upon current's task | ||
1290 | * security record. | ||
1291 | */ | ||
1292 | int set_current_groups(struct group_info *group_info) | ||
1293 | { | ||
1294 | struct cred *new; | ||
1295 | int ret; | ||
1228 | 1296 | ||
1229 | put_group_info(old_info); | 1297 | new = prepare_creds(); |
1298 | if (!new) | ||
1299 | return -ENOMEM; | ||
1230 | 1300 | ||
1231 | return 0; | 1301 | ret = set_groups(new, group_info); |
1302 | if (ret < 0) { | ||
1303 | abort_creds(new); | ||
1304 | return ret; | ||
1305 | } | ||
1306 | |||
1307 | return commit_creds(new); | ||
1232 | } | 1308 | } |
1233 | 1309 | ||
1234 | EXPORT_SYMBOL(set_current_groups); | 1310 | EXPORT_SYMBOL(set_current_groups); |
1235 | 1311 | ||
1236 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) | 1312 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) |
1237 | { | 1313 | { |
1238 | int i = 0; | 1314 | const struct cred *cred = current_cred(); |
1239 | 1315 | int i; | |
1240 | /* | ||
1241 | * SMP: Nobody else can change our grouplist. Thus we are | ||
1242 | * safe. | ||
1243 | */ | ||
1244 | 1316 | ||
1245 | if (gidsetsize < 0) | 1317 | if (gidsetsize < 0) |
1246 | return -EINVAL; | 1318 | return -EINVAL; |
1247 | 1319 | ||
1248 | /* no need to grab task_lock here; it cannot change */ | 1320 | /* no need to grab task_lock here; it cannot change */ |
1249 | i = current->group_info->ngroups; | 1321 | i = cred->group_info->ngroups; |
1250 | if (gidsetsize) { | 1322 | if (gidsetsize) { |
1251 | if (i > gidsetsize) { | 1323 | if (i > gidsetsize) { |
1252 | i = -EINVAL; | 1324 | i = -EINVAL; |
1253 | goto out; | 1325 | goto out; |
1254 | } | 1326 | } |
1255 | if (groups_to_user(grouplist, current->group_info)) { | 1327 | if (groups_to_user(grouplist, cred->group_info)) { |
1256 | i = -EFAULT; | 1328 | i = -EFAULT; |
1257 | goto out; | 1329 | goto out; |
1258 | } | 1330 | } |
@@ -1296,9 +1368,11 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) | |||
1296 | */ | 1368 | */ |
1297 | int in_group_p(gid_t grp) | 1369 | int in_group_p(gid_t grp) |
1298 | { | 1370 | { |
1371 | const struct cred *cred = current_cred(); | ||
1299 | int retval = 1; | 1372 | int retval = 1; |
1300 | if (grp != current->fsgid) | 1373 | |
1301 | retval = groups_search(current->group_info, grp); | 1374 | if (grp != cred->fsgid) |
1375 | retval = groups_search(cred->group_info, grp); | ||
1302 | return retval; | 1376 | return retval; |
1303 | } | 1377 | } |
1304 | 1378 | ||
@@ -1306,9 +1380,11 @@ EXPORT_SYMBOL(in_group_p); | |||
1306 | 1380 | ||
1307 | int in_egroup_p(gid_t grp) | 1381 | int in_egroup_p(gid_t grp) |
1308 | { | 1382 | { |
1383 | const struct cred *cred = current_cred(); | ||
1309 | int retval = 1; | 1384 | int retval = 1; |
1310 | if (grp != current->egid) | 1385 | |
1311 | retval = groups_search(current->group_info, grp); | 1386 | if (grp != cred->egid) |
1387 | retval = groups_search(cred->group_info, grp); | ||
1312 | return retval; | 1388 | return retval; |
1313 | } | 1389 | } |
1314 | 1390 | ||
@@ -1624,50 +1700,56 @@ asmlinkage long sys_umask(int mask) | |||
1624 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | 1700 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, |
1625 | unsigned long arg4, unsigned long arg5) | 1701 | unsigned long arg4, unsigned long arg5) |
1626 | { | 1702 | { |
1627 | long error = 0; | 1703 | struct task_struct *me = current; |
1704 | unsigned char comm[sizeof(me->comm)]; | ||
1705 | long error; | ||
1628 | 1706 | ||
1629 | if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) | 1707 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); |
1708 | if (error != -ENOSYS) | ||
1630 | return error; | 1709 | return error; |
1631 | 1710 | ||
1711 | error = 0; | ||
1632 | switch (option) { | 1712 | switch (option) { |
1633 | case PR_SET_PDEATHSIG: | 1713 | case PR_SET_PDEATHSIG: |
1634 | if (!valid_signal(arg2)) { | 1714 | if (!valid_signal(arg2)) { |
1635 | error = -EINVAL; | 1715 | error = -EINVAL; |
1636 | break; | 1716 | break; |
1637 | } | 1717 | } |
1638 | current->pdeath_signal = arg2; | 1718 | me->pdeath_signal = arg2; |
1719 | error = 0; | ||
1639 | break; | 1720 | break; |
1640 | case PR_GET_PDEATHSIG: | 1721 | case PR_GET_PDEATHSIG: |
1641 | error = put_user(current->pdeath_signal, (int __user *)arg2); | 1722 | error = put_user(me->pdeath_signal, (int __user *)arg2); |
1642 | break; | 1723 | break; |
1643 | case PR_GET_DUMPABLE: | 1724 | case PR_GET_DUMPABLE: |
1644 | error = get_dumpable(current->mm); | 1725 | error = get_dumpable(me->mm); |
1645 | break; | 1726 | break; |
1646 | case PR_SET_DUMPABLE: | 1727 | case PR_SET_DUMPABLE: |
1647 | if (arg2 < 0 || arg2 > 1) { | 1728 | if (arg2 < 0 || arg2 > 1) { |
1648 | error = -EINVAL; | 1729 | error = -EINVAL; |
1649 | break; | 1730 | break; |
1650 | } | 1731 | } |
1651 | set_dumpable(current->mm, arg2); | 1732 | set_dumpable(me->mm, arg2); |
1733 | error = 0; | ||
1652 | break; | 1734 | break; |
1653 | 1735 | ||
1654 | case PR_SET_UNALIGN: | 1736 | case PR_SET_UNALIGN: |
1655 | error = SET_UNALIGN_CTL(current, arg2); | 1737 | error = SET_UNALIGN_CTL(me, arg2); |
1656 | break; | 1738 | break; |
1657 | case PR_GET_UNALIGN: | 1739 | case PR_GET_UNALIGN: |
1658 | error = GET_UNALIGN_CTL(current, arg2); | 1740 | error = GET_UNALIGN_CTL(me, arg2); |
1659 | break; | 1741 | break; |
1660 | case PR_SET_FPEMU: | 1742 | case PR_SET_FPEMU: |
1661 | error = SET_FPEMU_CTL(current, arg2); | 1743 | error = SET_FPEMU_CTL(me, arg2); |
1662 | break; | 1744 | break; |
1663 | case PR_GET_FPEMU: | 1745 | case PR_GET_FPEMU: |
1664 | error = GET_FPEMU_CTL(current, arg2); | 1746 | error = GET_FPEMU_CTL(me, arg2); |
1665 | break; | 1747 | break; |
1666 | case PR_SET_FPEXC: | 1748 | case PR_SET_FPEXC: |
1667 | error = SET_FPEXC_CTL(current, arg2); | 1749 | error = SET_FPEXC_CTL(me, arg2); |
1668 | break; | 1750 | break; |
1669 | case PR_GET_FPEXC: | 1751 | case PR_GET_FPEXC: |
1670 | error = GET_FPEXC_CTL(current, arg2); | 1752 | error = GET_FPEXC_CTL(me, arg2); |
1671 | break; | 1753 | break; |
1672 | case PR_GET_TIMING: | 1754 | case PR_GET_TIMING: |
1673 | error = PR_TIMING_STATISTICAL; | 1755 | error = PR_TIMING_STATISTICAL; |
@@ -1675,33 +1757,28 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1675 | case PR_SET_TIMING: | 1757 | case PR_SET_TIMING: |
1676 | if (arg2 != PR_TIMING_STATISTICAL) | 1758 | if (arg2 != PR_TIMING_STATISTICAL) |
1677 | error = -EINVAL; | 1759 | error = -EINVAL; |
1760 | else | ||
1761 | error = 0; | ||
1678 | break; | 1762 | break; |
1679 | 1763 | ||
1680 | case PR_SET_NAME: { | 1764 | case PR_SET_NAME: |
1681 | struct task_struct *me = current; | 1765 | comm[sizeof(me->comm)-1] = 0; |
1682 | unsigned char ncomm[sizeof(me->comm)]; | 1766 | if (strncpy_from_user(comm, (char __user *)arg2, |
1683 | 1767 | sizeof(me->comm) - 1) < 0) | |
1684 | ncomm[sizeof(me->comm)-1] = 0; | ||
1685 | if (strncpy_from_user(ncomm, (char __user *)arg2, | ||
1686 | sizeof(me->comm)-1) < 0) | ||
1687 | return -EFAULT; | 1768 | return -EFAULT; |
1688 | set_task_comm(me, ncomm); | 1769 | set_task_comm(me, comm); |
1689 | return 0; | 1770 | return 0; |
1690 | } | 1771 | case PR_GET_NAME: |
1691 | case PR_GET_NAME: { | 1772 | get_task_comm(comm, me); |
1692 | struct task_struct *me = current; | 1773 | if (copy_to_user((char __user *)arg2, comm, |
1693 | unsigned char tcomm[sizeof(me->comm)]; | 1774 | sizeof(comm))) |
1694 | |||
1695 | get_task_comm(tcomm, me); | ||
1696 | if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) | ||
1697 | return -EFAULT; | 1775 | return -EFAULT; |
1698 | return 0; | 1776 | return 0; |
1699 | } | ||
1700 | case PR_GET_ENDIAN: | 1777 | case PR_GET_ENDIAN: |
1701 | error = GET_ENDIAN(current, arg2); | 1778 | error = GET_ENDIAN(me, arg2); |
1702 | break; | 1779 | break; |
1703 | case PR_SET_ENDIAN: | 1780 | case PR_SET_ENDIAN: |
1704 | error = SET_ENDIAN(current, arg2); | 1781 | error = SET_ENDIAN(me, arg2); |
1705 | break; | 1782 | break; |
1706 | 1783 | ||
1707 | case PR_GET_SECCOMP: | 1784 | case PR_GET_SECCOMP: |
@@ -1725,6 +1802,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1725 | current->default_timer_slack_ns; | 1802 | current->default_timer_slack_ns; |
1726 | else | 1803 | else |
1727 | current->timer_slack_ns = arg2; | 1804 | current->timer_slack_ns = arg2; |
1805 | error = 0; | ||
1728 | break; | 1806 | break; |
1729 | default: | 1807 | default: |
1730 | error = -EINVAL; | 1808 | error = -EINVAL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6ac501a2dcc6..0b627d9c93d8 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1671,7 +1671,7 @@ out: | |||
1671 | 1671 | ||
1672 | static int test_perm(int mode, int op) | 1672 | static int test_perm(int mode, int op) |
1673 | { | 1673 | { |
1674 | if (!current->euid) | 1674 | if (!current_euid()) |
1675 | mode >>= 6; | 1675 | mode >>= 6; |
1676 | else if (in_egroup_p(0)) | 1676 | else if (in_egroup_p(0)) |
1677 | mode >>= 3; | 1677 | mode >>= 3; |
diff --git a/kernel/timer.c b/kernel/timer.c index dbd50fabe4c7..566257d1dc10 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1192,25 +1192,25 @@ asmlinkage long sys_getppid(void) | |||
1192 | asmlinkage long sys_getuid(void) | 1192 | asmlinkage long sys_getuid(void) |
1193 | { | 1193 | { |
1194 | /* Only we change this so SMP safe */ | 1194 | /* Only we change this so SMP safe */ |
1195 | return current->uid; | 1195 | return current_uid(); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | asmlinkage long sys_geteuid(void) | 1198 | asmlinkage long sys_geteuid(void) |
1199 | { | 1199 | { |
1200 | /* Only we change this so SMP safe */ | 1200 | /* Only we change this so SMP safe */ |
1201 | return current->euid; | 1201 | return current_euid(); |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | asmlinkage long sys_getgid(void) | 1204 | asmlinkage long sys_getgid(void) |
1205 | { | 1205 | { |
1206 | /* Only we change this so SMP safe */ | 1206 | /* Only we change this so SMP safe */ |
1207 | return current->gid; | 1207 | return current_gid(); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | asmlinkage long sys_getegid(void) | 1210 | asmlinkage long sys_getegid(void) |
1211 | { | 1211 | { |
1212 | /* Only we change this so SMP safe */ | 1212 | /* Only we change this so SMP safe */ |
1213 | return current->egid; | 1213 | return current_egid(); |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | #endif | 1216 | #endif |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 79db26e8216e..f4bb3800318b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -321,7 +321,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
321 | 321 | ||
322 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 322 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); |
323 | data->pid = tsk->pid; | 323 | data->pid = tsk->pid; |
324 | data->uid = tsk->uid; | 324 | data->uid = task_uid(tsk); |
325 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 325 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
326 | data->policy = tsk->policy; | 326 | data->policy = tsk->policy; |
327 | data->rt_priority = tsk->rt_priority; | 327 | data->rt_priority = tsk->rt_priority; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4bf39fcae97a..bc7d90850be5 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -592,6 +592,12 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
592 | if (ent->flags & TRACE_FLAG_CONT) | 592 | if (ent->flags & TRACE_FLAG_CONT) |
593 | trace_seq_print_cont(s, iter); | 593 | trace_seq_print_cont(s, iter); |
594 | 594 | ||
595 | /* Strip ending newline */ | ||
596 | if (s->buffer[s->len - 1] == '\n') { | ||
597 | s->buffer[s->len - 1] = '\0'; | ||
598 | s->len--; | ||
599 | } | ||
600 | |||
595 | ret = trace_seq_printf(s, " */\n"); | 601 | ret = trace_seq_printf(s, " */\n"); |
596 | if (!ret) | 602 | if (!ret) |
597 | return TRACE_TYPE_PARTIAL_LINE; | 603 | return TRACE_TYPE_PARTIAL_LINE; |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 8ebcd8532dfb..2dc06ab35716 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | 28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) |
29 | { | 29 | { |
30 | const struct cred *tcred; | ||
30 | struct timespec uptime, ts; | 31 | struct timespec uptime, ts; |
31 | u64 ac_etime; | 32 | u64 ac_etime; |
32 | 33 | ||
@@ -53,10 +54,11 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
53 | stats->ac_flag |= AXSIG; | 54 | stats->ac_flag |= AXSIG; |
54 | stats->ac_nice = task_nice(tsk); | 55 | stats->ac_nice = task_nice(tsk); |
55 | stats->ac_sched = tsk->policy; | 56 | stats->ac_sched = tsk->policy; |
56 | stats->ac_uid = tsk->uid; | ||
57 | stats->ac_gid = tsk->gid; | ||
58 | stats->ac_pid = tsk->pid; | 57 | stats->ac_pid = tsk->pid; |
59 | rcu_read_lock(); | 58 | rcu_read_lock(); |
59 | tcred = __task_cred(tsk); | ||
60 | stats->ac_uid = tcred->uid; | ||
61 | stats->ac_gid = tcred->gid; | ||
60 | stats->ac_ppid = pid_alive(tsk) ? | 62 | stats->ac_ppid = pid_alive(tsk) ? |
61 | rcu_dereference(tsk->real_parent)->tgid : 0; | 63 | rcu_dereference(tsk->real_parent)->tgid : 0; |
62 | rcu_read_unlock(); | 64 | rcu_read_unlock(); |
diff --git a/kernel/uid16.c b/kernel/uid16.c index 3e41c1673e2f..2460c3199b5a 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
@@ -84,11 +84,12 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) | |||
84 | 84 | ||
85 | asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) | 85 | asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) |
86 | { | 86 | { |
87 | const struct cred *cred = current_cred(); | ||
87 | int retval; | 88 | int retval; |
88 | 89 | ||
89 | if (!(retval = put_user(high2lowuid(current->uid), ruid)) && | 90 | if (!(retval = put_user(high2lowuid(cred->uid), ruid)) && |
90 | !(retval = put_user(high2lowuid(current->euid), euid))) | 91 | !(retval = put_user(high2lowuid(cred->euid), euid))) |
91 | retval = put_user(high2lowuid(current->suid), suid); | 92 | retval = put_user(high2lowuid(cred->suid), suid); |
92 | 93 | ||
93 | return retval; | 94 | return retval; |
94 | } | 95 | } |
@@ -104,11 +105,12 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) | |||
104 | 105 | ||
105 | asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) | 106 | asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) |
106 | { | 107 | { |
108 | const struct cred *cred = current_cred(); | ||
107 | int retval; | 109 | int retval; |
108 | 110 | ||
109 | if (!(retval = put_user(high2lowgid(current->gid), rgid)) && | 111 | if (!(retval = put_user(high2lowgid(cred->gid), rgid)) && |
110 | !(retval = put_user(high2lowgid(current->egid), egid))) | 112 | !(retval = put_user(high2lowgid(cred->egid), egid))) |
111 | retval = put_user(high2lowgid(current->sgid), sgid); | 113 | retval = put_user(high2lowgid(cred->sgid), sgid); |
112 | 114 | ||
113 | return retval; | 115 | return retval; |
114 | } | 116 | } |
@@ -161,25 +163,24 @@ static int groups16_from_user(struct group_info *group_info, | |||
161 | 163 | ||
162 | asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) | 164 | asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) |
163 | { | 165 | { |
164 | int i = 0; | 166 | const struct cred *cred = current_cred(); |
167 | int i; | ||
165 | 168 | ||
166 | if (gidsetsize < 0) | 169 | if (gidsetsize < 0) |
167 | return -EINVAL; | 170 | return -EINVAL; |
168 | 171 | ||
169 | get_group_info(current->group_info); | 172 | i = cred->group_info->ngroups; |
170 | i = current->group_info->ngroups; | ||
171 | if (gidsetsize) { | 173 | if (gidsetsize) { |
172 | if (i > gidsetsize) { | 174 | if (i > gidsetsize) { |
173 | i = -EINVAL; | 175 | i = -EINVAL; |
174 | goto out; | 176 | goto out; |
175 | } | 177 | } |
176 | if (groups16_to_user(grouplist, current->group_info)) { | 178 | if (groups16_to_user(grouplist, cred->group_info)) { |
177 | i = -EFAULT; | 179 | i = -EFAULT; |
178 | goto out; | 180 | goto out; |
179 | } | 181 | } |
180 | } | 182 | } |
181 | out: | 183 | out: |
182 | put_group_info(current->group_info); | ||
183 | return i; | 184 | return i; |
184 | } | 185 | } |
185 | 186 | ||
@@ -210,20 +211,20 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) | |||
210 | 211 | ||
211 | asmlinkage long sys_getuid16(void) | 212 | asmlinkage long sys_getuid16(void) |
212 | { | 213 | { |
213 | return high2lowuid(current->uid); | 214 | return high2lowuid(current_uid()); |
214 | } | 215 | } |
215 | 216 | ||
216 | asmlinkage long sys_geteuid16(void) | 217 | asmlinkage long sys_geteuid16(void) |
217 | { | 218 | { |
218 | return high2lowuid(current->euid); | 219 | return high2lowuid(current_euid()); |
219 | } | 220 | } |
220 | 221 | ||
221 | asmlinkage long sys_getgid16(void) | 222 | asmlinkage long sys_getgid16(void) |
222 | { | 223 | { |
223 | return high2lowgid(current->gid); | 224 | return high2lowgid(current_gid()); |
224 | } | 225 | } |
225 | 226 | ||
226 | asmlinkage long sys_getegid16(void) | 227 | asmlinkage long sys_getegid16(void) |
227 | { | 228 | { |
228 | return high2lowgid(current->egid); | 229 | return high2lowgid(current_egid()); |
229 | } | 230 | } |
diff --git a/kernel/user.c b/kernel/user.c index 39d6159fae43..477b6660f447 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -16,12 +16,13 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
19 | #include "cred-internals.h" | ||
19 | 20 | ||
20 | struct user_namespace init_user_ns = { | 21 | struct user_namespace init_user_ns = { |
21 | .kref = { | 22 | .kref = { |
22 | .refcount = ATOMIC_INIT(2), | 23 | .refcount = ATOMIC_INIT(1), |
23 | }, | 24 | }, |
24 | .root_user = &root_user, | 25 | .creator = &root_user, |
25 | }; | 26 | }; |
26 | EXPORT_SYMBOL_GPL(init_user_ns); | 27 | EXPORT_SYMBOL_GPL(init_user_ns); |
27 | 28 | ||
@@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep; | |||
47 | */ | 48 | */ |
48 | static DEFINE_SPINLOCK(uidhash_lock); | 49 | static DEFINE_SPINLOCK(uidhash_lock); |
49 | 50 | ||
51 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ | ||
50 | struct user_struct root_user = { | 52 | struct user_struct root_user = { |
51 | .__count = ATOMIC_INIT(1), | 53 | .__count = ATOMIC_INIT(2), |
52 | .processes = ATOMIC_INIT(1), | 54 | .processes = ATOMIC_INIT(1), |
53 | .files = ATOMIC_INIT(0), | 55 | .files = ATOMIC_INIT(0), |
54 | .sigpending = ATOMIC_INIT(0), | 56 | .sigpending = ATOMIC_INIT(0), |
55 | .locked_shm = 0, | 57 | .locked_shm = 0, |
58 | .user_ns = &init_user_ns, | ||
56 | #ifdef CONFIG_USER_SCHED | 59 | #ifdef CONFIG_USER_SCHED |
57 | .tg = &init_task_group, | 60 | .tg = &init_task_group, |
58 | #endif | 61 | #endif |
@@ -101,19 +104,15 @@ static int sched_create_user(struct user_struct *up) | |||
101 | if (IS_ERR(up->tg)) | 104 | if (IS_ERR(up->tg)) |
102 | rc = -ENOMEM; | 105 | rc = -ENOMEM; |
103 | 106 | ||
104 | return rc; | 107 | set_tg_uid(up); |
105 | } | ||
106 | 108 | ||
107 | static void sched_switch_user(struct task_struct *p) | 109 | return rc; |
108 | { | ||
109 | sched_move_task(p); | ||
110 | } | 110 | } |
111 | 111 | ||
112 | #else /* CONFIG_USER_SCHED */ | 112 | #else /* CONFIG_USER_SCHED */ |
113 | 113 | ||
114 | static void sched_destroy_user(struct user_struct *up) { } | 114 | static void sched_destroy_user(struct user_struct *up) { } |
115 | static int sched_create_user(struct user_struct *up) { return 0; } | 115 | static int sched_create_user(struct user_struct *up) { return 0; } |
116 | static void sched_switch_user(struct task_struct *p) { } | ||
117 | 116 | ||
118 | #endif /* CONFIG_USER_SCHED */ | 117 | #endif /* CONFIG_USER_SCHED */ |
119 | 118 | ||
@@ -242,13 +241,21 @@ static struct kobj_type uids_ktype = { | |||
242 | .release = uids_release, | 241 | .release = uids_release, |
243 | }; | 242 | }; |
244 | 243 | ||
245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | 244 | /* |
245 | * Create /sys/kernel/uids/<uid>/cpu_share file for this user | ||
246 | * We do not create this file for users in a user namespace (until | ||
247 | * sysfs tagging is implemented). | ||
248 | * | ||
249 | * See Documentation/scheduler/sched-design-CFS.txt for ramifications. | ||
250 | */ | ||
246 | static int uids_user_create(struct user_struct *up) | 251 | static int uids_user_create(struct user_struct *up) |
247 | { | 252 | { |
248 | struct kobject *kobj = &up->kobj; | 253 | struct kobject *kobj = &up->kobj; |
249 | int error; | 254 | int error; |
250 | 255 | ||
251 | memset(kobj, 0, sizeof(struct kobject)); | 256 | memset(kobj, 0, sizeof(struct kobject)); |
257 | if (up->user_ns != &init_user_ns) | ||
258 | return 0; | ||
252 | kobj->kset = uids_kset; | 259 | kobj->kset = uids_kset; |
253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | 260 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
254 | if (error) { | 261 | if (error) { |
@@ -284,6 +291,8 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
284 | unsigned long flags; | 291 | unsigned long flags; |
285 | int remove_user = 0; | 292 | int remove_user = 0; |
286 | 293 | ||
294 | if (up->user_ns != &init_user_ns) | ||
295 | return; | ||
287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 296 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
288 | * atomic. | 297 | * atomic. |
289 | */ | 298 | */ |
@@ -319,12 +328,13 @@ done: | |||
319 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 328 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
320 | * upon function exit. | 329 | * upon function exit. |
321 | */ | 330 | */ |
322 | static inline void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
323 | { | 332 | { |
324 | /* restore back the count */ | 333 | /* restore back the count */ |
325 | atomic_inc(&up->__count); | 334 | atomic_inc(&up->__count); |
326 | spin_unlock_irqrestore(&uidhash_lock, flags); | 335 | spin_unlock_irqrestore(&uidhash_lock, flags); |
327 | 336 | ||
337 | put_user_ns(up->user_ns); | ||
328 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
329 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
330 | } | 340 | } |
@@ -340,13 +350,14 @@ static inline void uids_mutex_unlock(void) { } | |||
340 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 350 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
341 | * upon function exit. | 351 | * upon function exit. |
342 | */ | 352 | */ |
343 | static inline void free_user(struct user_struct *up, unsigned long flags) | 353 | static void free_user(struct user_struct *up, unsigned long flags) |
344 | { | 354 | { |
345 | uid_hash_remove(up); | 355 | uid_hash_remove(up); |
346 | spin_unlock_irqrestore(&uidhash_lock, flags); | 356 | spin_unlock_irqrestore(&uidhash_lock, flags); |
347 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
348 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
349 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
360 | put_user_ns(up->user_ns); | ||
350 | kmem_cache_free(uid_cachep, up); | 361 | kmem_cache_free(uid_cachep, up); |
351 | } | 362 | } |
352 | 363 | ||
@@ -362,7 +373,7 @@ struct user_struct *find_user(uid_t uid) | |||
362 | { | 373 | { |
363 | struct user_struct *ret; | 374 | struct user_struct *ret; |
364 | unsigned long flags; | 375 | unsigned long flags; |
365 | struct user_namespace *ns = current->nsproxy->user_ns; | 376 | struct user_namespace *ns = current_user_ns(); |
366 | 377 | ||
367 | spin_lock_irqsave(&uidhash_lock, flags); | 378 | spin_lock_irqsave(&uidhash_lock, flags); |
368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 379 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
@@ -409,6 +420,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
409 | if (sched_create_user(new) < 0) | 420 | if (sched_create_user(new) < 0) |
410 | goto out_free_user; | 421 | goto out_free_user; |
411 | 422 | ||
423 | new->user_ns = get_user_ns(ns); | ||
424 | |||
412 | if (uids_user_create(new)) | 425 | if (uids_user_create(new)) |
413 | goto out_destoy_sched; | 426 | goto out_destoy_sched; |
414 | 427 | ||
@@ -432,7 +445,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
432 | up = new; | 445 | up = new; |
433 | } | 446 | } |
434 | spin_unlock_irq(&uidhash_lock); | 447 | spin_unlock_irq(&uidhash_lock); |
435 | |||
436 | } | 448 | } |
437 | 449 | ||
438 | uids_mutex_unlock(); | 450 | uids_mutex_unlock(); |
@@ -441,6 +453,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
441 | 453 | ||
442 | out_destoy_sched: | 454 | out_destoy_sched: |
443 | sched_destroy_user(new); | 455 | sched_destroy_user(new); |
456 | put_user_ns(new->user_ns); | ||
444 | out_free_user: | 457 | out_free_user: |
445 | kmem_cache_free(uid_cachep, new); | 458 | kmem_cache_free(uid_cachep, new); |
446 | out_unlock: | 459 | out_unlock: |
@@ -448,63 +461,6 @@ out_unlock: | |||
448 | return NULL; | 461 | return NULL; |
449 | } | 462 | } |
450 | 463 | ||
451 | void switch_uid(struct user_struct *new_user) | ||
452 | { | ||
453 | struct user_struct *old_user; | ||
454 | |||
455 | /* What if a process setreuid()'s and this brings the | ||
456 | * new uid over his NPROC rlimit? We can check this now | ||
457 | * cheaply with the new uid cache, so if it matters | ||
458 | * we should be checking for it. -DaveM | ||
459 | */ | ||
460 | old_user = current->user; | ||
461 | atomic_inc(&new_user->processes); | ||
462 | atomic_dec(&old_user->processes); | ||
463 | switch_uid_keyring(new_user); | ||
464 | current->user = new_user; | ||
465 | sched_switch_user(current); | ||
466 | |||
467 | /* | ||
468 | * We need to synchronize with __sigqueue_alloc() | ||
469 | * doing a get_uid(p->user).. If that saw the old | ||
470 | * user value, we need to wait until it has exited | ||
471 | * its critical region before we can free the old | ||
472 | * structure. | ||
473 | */ | ||
474 | smp_mb(); | ||
475 | spin_unlock_wait(¤t->sighand->siglock); | ||
476 | |||
477 | free_uid(old_user); | ||
478 | suid_keys(current); | ||
479 | } | ||
480 | |||
481 | #ifdef CONFIG_USER_NS | ||
482 | void release_uids(struct user_namespace *ns) | ||
483 | { | ||
484 | int i; | ||
485 | unsigned long flags; | ||
486 | struct hlist_head *head; | ||
487 | struct hlist_node *nd; | ||
488 | |||
489 | spin_lock_irqsave(&uidhash_lock, flags); | ||
490 | /* | ||
491 | * collapse the chains so that the user_struct-s will | ||
492 | * be still alive, but not in hashes. subsequent free_uid() | ||
493 | * will free them. | ||
494 | */ | ||
495 | for (i = 0; i < UIDHASH_SZ; i++) { | ||
496 | head = ns->uidhash_table + i; | ||
497 | while (!hlist_empty(head)) { | ||
498 | nd = head->first; | ||
499 | hlist_del_init(nd); | ||
500 | } | ||
501 | } | ||
502 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
503 | |||
504 | free_uid(ns->root_user); | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | static int __init uid_cache_init(void) | 464 | static int __init uid_cache_init(void) |
509 | { | 465 | { |
510 | int n; | 466 | int n; |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 532858fa5b88..79084311ee57 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -9,60 +9,55 @@ | |||
9 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
12 | #include <linux/cred.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * Clone a new ns copying an original user ns, setting refcount to 1 | 15 | * Create a new user namespace, deriving the creator from the user in the |
15 | * @old_ns: namespace to clone | 16 | * passed credentials, and replacing that user with the new root user for the |
16 | * Return NULL on error (failure to kmalloc), new ns otherwise | 17 | * new namespace. |
18 | * | ||
19 | * This is called by copy_creds(), which will finish setting the target task's | ||
20 | * credentials. | ||
17 | */ | 21 | */ |
18 | static struct user_namespace *clone_user_ns(struct user_namespace *old_ns) | 22 | int create_user_ns(struct cred *new) |
19 | { | 23 | { |
20 | struct user_namespace *ns; | 24 | struct user_namespace *ns; |
21 | struct user_struct *new_user; | 25 | struct user_struct *root_user; |
22 | int n; | 26 | int n; |
23 | 27 | ||
24 | ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); | 28 | ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); |
25 | if (!ns) | 29 | if (!ns) |
26 | return ERR_PTR(-ENOMEM); | 30 | return -ENOMEM; |
27 | 31 | ||
28 | kref_init(&ns->kref); | 32 | kref_init(&ns->kref); |
29 | 33 | ||
30 | for (n = 0; n < UIDHASH_SZ; ++n) | 34 | for (n = 0; n < UIDHASH_SZ; ++n) |
31 | INIT_HLIST_HEAD(ns->uidhash_table + n); | 35 | INIT_HLIST_HEAD(ns->uidhash_table + n); |
32 | 36 | ||
33 | /* Insert new root user. */ | 37 | /* Alloc new root user. */ |
34 | ns->root_user = alloc_uid(ns, 0); | 38 | root_user = alloc_uid(ns, 0); |
35 | if (!ns->root_user) { | 39 | if (!root_user) { |
36 | kfree(ns); | 40 | kfree(ns); |
37 | return ERR_PTR(-ENOMEM); | 41 | return -ENOMEM; |
38 | } | 42 | } |
39 | 43 | ||
40 | /* Reset current->user with a new one */ | 44 | /* set the new root user in the credentials under preparation */ |
41 | new_user = alloc_uid(ns, current->uid); | 45 | ns->creator = new->user; |
42 | if (!new_user) { | 46 | new->user = root_user; |
43 | free_uid(ns->root_user); | 47 | new->uid = new->euid = new->suid = new->fsuid = 0; |
44 | kfree(ns); | 48 | new->gid = new->egid = new->sgid = new->fsgid = 0; |
45 | return ERR_PTR(-ENOMEM); | 49 | put_group_info(new->group_info); |
46 | } | 50 | new->group_info = get_group_info(&init_groups); |
47 | 51 | #ifdef CONFIG_KEYS | |
48 | switch_uid(new_user); | 52 | key_put(new->request_key_auth); |
49 | return ns; | 53 | new->request_key_auth = NULL; |
50 | } | 54 | #endif |
51 | 55 | /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */ | |
52 | struct user_namespace * copy_user_ns(int flags, struct user_namespace *old_ns) | ||
53 | { | ||
54 | struct user_namespace *new_ns; | ||
55 | |||
56 | BUG_ON(!old_ns); | ||
57 | get_user_ns(old_ns); | ||
58 | |||
59 | if (!(flags & CLONE_NEWUSER)) | ||
60 | return old_ns; | ||
61 | 56 | ||
62 | new_ns = clone_user_ns(old_ns); | 57 | /* alloc_uid() incremented the userns refcount. Just set it to 1 */ |
58 | kref_set(&ns->kref, 1); | ||
63 | 59 | ||
64 | put_user_ns(old_ns); | 60 | return 0; |
65 | return new_ns; | ||
66 | } | 61 | } |
67 | 62 | ||
68 | void free_user_ns(struct kref *kref) | 63 | void free_user_ns(struct kref *kref) |
@@ -70,7 +65,7 @@ void free_user_ns(struct kref *kref) | |||
70 | struct user_namespace *ns; | 65 | struct user_namespace *ns; |
71 | 66 | ||
72 | ns = container_of(kref, struct user_namespace, kref); | 67 | ns = container_of(kref, struct user_namespace, kref); |
73 | release_uids(ns); | 68 | free_uid(ns->creator); |
74 | kfree(ns); | 69 | kfree(ns); |
75 | } | 70 | } |
76 | EXPORT_SYMBOL(free_user_ns); | 71 | EXPORT_SYMBOL(free_user_ns); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d4dc69ddebd7..4952322cba45 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -84,21 +84,21 @@ static cpumask_t cpu_singlethread_map __read_mostly; | |||
84 | static cpumask_t cpu_populated_map __read_mostly; | 84 | static cpumask_t cpu_populated_map __read_mostly; |
85 | 85 | ||
86 | /* If it's single threaded, it isn't in the list of workqueues. */ | 86 | /* If it's single threaded, it isn't in the list of workqueues. */ |
87 | static inline int is_single_threaded(struct workqueue_struct *wq) | 87 | static inline int is_wq_single_threaded(struct workqueue_struct *wq) |
88 | { | 88 | { |
89 | return wq->singlethread; | 89 | return wq->singlethread; |
90 | } | 90 | } |
91 | 91 | ||
92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) |
93 | { | 93 | { |
94 | return is_single_threaded(wq) | 94 | return is_wq_single_threaded(wq) |
95 | ? &cpu_singlethread_map : &cpu_populated_map; | 95 | ? &cpu_singlethread_map : &cpu_populated_map; |
96 | } | 96 | } |
97 | 97 | ||
98 | static | 98 | static |
99 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 99 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) |
100 | { | 100 | { |
101 | if (unlikely(is_single_threaded(wq))) | 101 | if (unlikely(is_wq_single_threaded(wq))) |
102 | cpu = singlethread_cpu; | 102 | cpu = singlethread_cpu; |
103 | return per_cpu_ptr(wq->cpu_wq, cpu); | 103 | return per_cpu_ptr(wq->cpu_wq, cpu); |
104 | } | 104 | } |
@@ -769,7 +769,7 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
769 | { | 769 | { |
770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
771 | struct workqueue_struct *wq = cwq->wq; | 771 | struct workqueue_struct *wq = cwq->wq; |
772 | const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 772 | const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; |
773 | struct task_struct *p; | 773 | struct task_struct *p; |
774 | 774 | ||
775 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 775 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); |