aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
committerMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
commit79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch)
tree5e0fa7156acb75ba603022bc807df8f2fedb97a8 /kernel/fork.c
parent721b51fcf91898299d96f4b72cb9434cda29dce6 (diff)
parent8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff)
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c70
1 files changed, 49 insertions, 21 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 1bfefc6f96a4..7d5f0f118a63 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
288} 288}
289 289
290#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
291/* Initialized by the architecture: */
292int arch_task_struct_size __read_mostly;
293#endif
294
290void __init fork_init(void) 295void __init fork_init(void)
291{ 296{
292#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 297#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
@@ -295,7 +300,7 @@ void __init fork_init(void)
295#endif 300#endif
296 /* create a slab on which task_structs can be allocated */ 301 /* create a slab on which task_structs can be allocated */
297 task_struct_cachep = 302 task_struct_cachep =
298 kmem_cache_create("task_struct", sizeof(struct task_struct), 303 kmem_cache_create("task_struct", arch_task_struct_size,
299 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 304 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
300#endif 305#endif
301 306
@@ -449,8 +454,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
449 tmp->vm_mm = mm; 454 tmp->vm_mm = mm;
450 if (anon_vma_fork(tmp, mpnt)) 455 if (anon_vma_fork(tmp, mpnt))
451 goto fail_nomem_anon_vma_fork; 456 goto fail_nomem_anon_vma_fork;
452 tmp->vm_flags &= ~VM_LOCKED; 457 tmp->vm_flags &= ~(VM_LOCKED|VM_UFFD_MISSING|VM_UFFD_WP);
453 tmp->vm_next = tmp->vm_prev = NULL; 458 tmp->vm_next = tmp->vm_prev = NULL;
459 tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
454 file = tmp->vm_file; 460 file = tmp->vm_file;
455 if (file) { 461 if (file) {
456 struct inode *inode = file_inode(file); 462 struct inode *inode = file_inode(file);
@@ -1067,6 +1073,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1067 rcu_assign_pointer(tsk->sighand, sig); 1073 rcu_assign_pointer(tsk->sighand, sig);
1068 if (!sig) 1074 if (!sig)
1069 return -ENOMEM; 1075 return -ENOMEM;
1076
1070 atomic_set(&sig->count, 1); 1077 atomic_set(&sig->count, 1);
1071 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1078 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1072 return 0; 1079 return 0;
@@ -1128,6 +1135,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1128 init_sigpending(&sig->shared_pending); 1135 init_sigpending(&sig->shared_pending);
1129 INIT_LIST_HEAD(&sig->posix_timers); 1136 INIT_LIST_HEAD(&sig->posix_timers);
1130 seqlock_init(&sig->stats_lock); 1137 seqlock_init(&sig->stats_lock);
1138 prev_cputime_init(&sig->prev_cputime);
1131 1139
1132 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1140 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1133 sig->real_timer.function = it_real_fn; 1141 sig->real_timer.function = it_real_fn;
@@ -1239,6 +1247,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1239{ 1247{
1240 int retval; 1248 int retval;
1241 struct task_struct *p; 1249 struct task_struct *p;
1250 void *cgrp_ss_priv[CGROUP_CANFORK_COUNT] = {};
1242 1251
1243 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1252 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1244 return ERR_PTR(-EINVAL); 1253 return ERR_PTR(-EINVAL);
@@ -1273,10 +1282,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1273 1282
1274 /* 1283 /*
1275 * If the new process will be in a different pid or user namespace 1284 * If the new process will be in a different pid or user namespace
1276 * do not allow it to share a thread group or signal handlers or 1285 * do not allow it to share a thread group with the forking task.
1277 * parent with the forking task.
1278 */ 1286 */
1279 if (clone_flags & CLONE_SIGHAND) { 1287 if (clone_flags & CLONE_THREAD) {
1280 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 1288 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1281 (task_active_pid_ns(current) != 1289 (task_active_pid_ns(current) !=
1282 current->nsproxy->pid_ns_for_children)) 1290 current->nsproxy->pid_ns_for_children))
@@ -1335,9 +1343,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1335 1343
1336 p->utime = p->stime = p->gtime = 0; 1344 p->utime = p->stime = p->gtime = 0;
1337 p->utimescaled = p->stimescaled = 0; 1345 p->utimescaled = p->stimescaled = 0;
1338#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1346 prev_cputime_init(&p->prev_cputime);
1339 p->prev_cputime.utime = p->prev_cputime.stime = 0; 1347
1340#endif
1341#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1348#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1342 seqlock_init(&p->vtime_seqlock); 1349 seqlock_init(&p->vtime_seqlock);
1343 p->vtime_snap = 0; 1350 p->vtime_snap = 0;
@@ -1513,6 +1520,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1513 p->task_works = NULL; 1520 p->task_works = NULL;
1514 1521
1515 /* 1522 /*
1523 * Ensure that the cgroup subsystem policies allow the new process to be
1524 * forked. It should be noted the the new process's css_set can be changed
1525 * between here and cgroup_post_fork() if an organisation operation is in
1526 * progress.
1527 */
1528 retval = cgroup_can_fork(p, cgrp_ss_priv);
1529 if (retval)
1530 goto bad_fork_free_pid;
1531
1532 /*
1516 * Make it visible to the rest of the system, but dont wake it up yet. 1533 * Make it visible to the rest of the system, but dont wake it up yet.
1517 * Need tasklist lock for parent etc handling! 1534 * Need tasklist lock for parent etc handling!
1518 */ 1535 */
@@ -1548,7 +1565,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1548 spin_unlock(&current->sighand->siglock); 1565 spin_unlock(&current->sighand->siglock);
1549 write_unlock_irq(&tasklist_lock); 1566 write_unlock_irq(&tasklist_lock);
1550 retval = -ERESTARTNOINTR; 1567 retval = -ERESTARTNOINTR;
1551 goto bad_fork_free_pid; 1568 goto bad_fork_cancel_cgroup;
1552 } 1569 }
1553 1570
1554 if (likely(p->pid)) { 1571 if (likely(p->pid)) {
@@ -1590,7 +1607,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1590 write_unlock_irq(&tasklist_lock); 1607 write_unlock_irq(&tasklist_lock);
1591 1608
1592 proc_fork_connector(p); 1609 proc_fork_connector(p);
1593 cgroup_post_fork(p); 1610 cgroup_post_fork(p, cgrp_ss_priv);
1594 if (clone_flags & CLONE_THREAD) 1611 if (clone_flags & CLONE_THREAD)
1595 threadgroup_change_end(current); 1612 threadgroup_change_end(current);
1596 perf_event_fork(p); 1613 perf_event_fork(p);
@@ -1600,6 +1617,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1600 1617
1601 return p; 1618 return p;
1602 1619
1620bad_fork_cancel_cgroup:
1621 cgroup_cancel_fork(p, cgrp_ss_priv);
1603bad_fork_free_pid: 1622bad_fork_free_pid:
1604 if (pid != &init_struct_pid) 1623 if (pid != &init_struct_pid)
1605 free_pid(pid); 1624 free_pid(pid);
@@ -1866,13 +1885,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
1866 CLONE_NEWUSER|CLONE_NEWPID)) 1885 CLONE_NEWUSER|CLONE_NEWPID))
1867 return -EINVAL; 1886 return -EINVAL;
1868 /* 1887 /*
1869 * Not implemented, but pretend it works if there is nothing to 1888 * Not implemented, but pretend it works if there is nothing
1870 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND 1889 * to unshare. Note that unsharing the address space or the
1871 * needs to unshare vm. 1890 * signal handlers also need to unshare the signal queues (aka
1891 * CLONE_THREAD).
1872 */ 1892 */
1873 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 1893 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1874 /* FIXME: get_task_mm() increments ->mm_users */ 1894 if (!thread_group_empty(current))
1875 if (atomic_read(&current->mm->mm_users) > 1) 1895 return -EINVAL;
1896 }
1897 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
1898 if (atomic_read(&current->sighand->count) > 1)
1899 return -EINVAL;
1900 }
1901 if (unshare_flags & CLONE_VM) {
1902 if (!current_is_single_threaded())
1876 return -EINVAL; 1903 return -EINVAL;
1877 } 1904 }
1878 1905
@@ -1936,21 +1963,22 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1936 int err; 1963 int err;
1937 1964
1938 /* 1965 /*
1939 * If unsharing a user namespace must also unshare the thread. 1966 * If unsharing a user namespace must also unshare the thread group
1967 * and unshare the filesystem root and working directories.
1940 */ 1968 */
1941 if (unshare_flags & CLONE_NEWUSER) 1969 if (unshare_flags & CLONE_NEWUSER)
1942 unshare_flags |= CLONE_THREAD | CLONE_FS; 1970 unshare_flags |= CLONE_THREAD | CLONE_FS;
1943 /* 1971 /*
1944 * If unsharing a thread from a thread group, must also unshare vm.
1945 */
1946 if (unshare_flags & CLONE_THREAD)
1947 unshare_flags |= CLONE_VM;
1948 /*
1949 * If unsharing vm, must also unshare signal handlers. 1972 * If unsharing vm, must also unshare signal handlers.
1950 */ 1973 */
1951 if (unshare_flags & CLONE_VM) 1974 if (unshare_flags & CLONE_VM)
1952 unshare_flags |= CLONE_SIGHAND; 1975 unshare_flags |= CLONE_SIGHAND;
1953 /* 1976 /*
1977 * If unsharing a signal handlers, must also unshare the signal queues.
1978 */
1979 if (unshare_flags & CLONE_SIGHAND)
1980 unshare_flags |= CLONE_THREAD;
1981 /*
1954 * If unsharing namespace, must also unshare filesystem information. 1982 * If unsharing namespace, must also unshare filesystem information.
1955 */ 1983 */
1956 if (unshare_flags & CLONE_NEWNS) 1984 if (unshare_flags & CLONE_NEWNS)