aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2018-07-23 14:38:00 -0400
committerEric W. Biederman <ebiederm@xmission.com>2018-08-03 21:20:14 -0400
commit924de3b8c9410c404c6eda7abffd282b97b3ff7f (patch)
tree6bcc1ed1e1a9268ad6ce8c899d9e728e769277fb /kernel
parent4390e9eadbbb6774b7ba03fde0a0fdf3f07db4cd (diff)
fork: Have new threads join on-going signal group stops
There are only two signals that are delivered to every member of a signal group: SIGSTOP and SIGKILL. Signal delivery requires every signal appear to be delivered either before or after a clone syscall. SIGKILL terminates the clone so does not need to be considered. Which leaves only SIGSTOP that needs to be considered when creating new threads. Today in the event of a group stop TIF_SIGPENDING will get set and the fork will restart ensuring the fork syscall participates in the group stop. A fork (especially of a process with a lot of memory) is one of the most expensive system so we really only want to restart a fork when necessary. It is easy so check to see if a SIGSTOP is ongoing and have the new thread join it immediate after the clone completes. Making it appear the clone completed happened just before the SIGSTOP. The calculate_sigpending function will see the bits set in jobctl and set TIF_SIGPENDING to ensure the new task takes the slow path to userspace. V2: The call to task_join_group_stop was moved before the new task is added to the thread group list. This should not matter as sighand->siglock is held over both the addition of the threads, the call to task_join_group_stop and do_signal_stop. But the change is trivial and it is one less thing to worry about when reading the code. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c27
-rw-r--r--kernel/signal.c14
2 files changed, 29 insertions, 12 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 22d4cdb9a7ca..ab731e15a600 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1934,18 +1934,20 @@ static __latent_entropy struct task_struct *copy_process(
1934 goto bad_fork_cancel_cgroup; 1934 goto bad_fork_cancel_cgroup;
1935 } 1935 }
1936 1936
1937 /* 1937 if (!(clone_flags & CLONE_THREAD)) {
1938 * Process group and session signals need to be delivered to just the 1938 /*
1939 * parent before the fork or both the parent and the child after the 1939 * Process group and session signals need to be delivered to just the
1940 * fork. Restart if a signal comes in before we add the new process to 1940 * parent before the fork or both the parent and the child after the
1941 * it's process group. 1941 * fork. Restart if a signal comes in before we add the new process to
1942 * A fatal signal pending means that current will exit, so the new 1942 * it's process group.
1943 * thread can't slip out of an OOM kill (or normal SIGKILL). 1943 * A fatal signal pending means that current will exit, so the new
1944 */ 1944 * thread can't slip out of an OOM kill (or normal SIGKILL).
1945 recalc_sigpending(); 1945 */
1946 if (signal_pending(current)) { 1946 recalc_sigpending();
1947 retval = -ERESTARTNOINTR; 1947 if (signal_pending(current)) {
1948 goto bad_fork_cancel_cgroup; 1948 retval = -ERESTARTNOINTR;
1949 goto bad_fork_cancel_cgroup;
1950 }
1949 } 1951 }
1950 1952
1951 1953
@@ -1982,6 +1984,7 @@ static __latent_entropy struct task_struct *copy_process(
1982 current->signal->nr_threads++; 1984 current->signal->nr_threads++;
1983 atomic_inc(&current->signal->live); 1985 atomic_inc(&current->signal->live);
1984 atomic_inc(&current->signal->sigcnt); 1986 atomic_inc(&current->signal->sigcnt);
1987 task_join_group_stop(p);
1985 list_add_tail_rcu(&p->thread_group, 1988 list_add_tail_rcu(&p->thread_group,
1986 &p->group_leader->thread_group); 1989 &p->group_leader->thread_group);
1987 list_add_tail_rcu(&p->thread_node, 1990 list_add_tail_rcu(&p->thread_node,
diff --git a/kernel/signal.c b/kernel/signal.c
index 1e06f1eba363..9f0eafb6d474 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -373,6 +373,20 @@ static bool task_participate_group_stop(struct task_struct *task)
373 return false; 373 return false;
374} 374}
375 375
376void task_join_group_stop(struct task_struct *task)
377{
378 /* Have the new thread join an on-going signal group stop */
379 unsigned long jobctl = current->jobctl;
380 if (jobctl & JOBCTL_STOP_PENDING) {
381 struct signal_struct *sig = current->signal;
382 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
383 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
384 if (task_set_jobctl_pending(task, signr | gstop)) {
385 sig->group_stop_count++;
386 }
387 }
388}
389
376/* 390/*
377 * allocate a new signal queue record 391 * allocate a new signal queue record
378 * - this may be called without locks if and only if t == current, otherwise an 392 * - this may be called without locks if and only if t == current, otherwise an