aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-06-22 06:45:07 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-06-22 13:04:22 -0400
commit784e0300fe9fe4aa81bd7df9d59e138f56bb605b (patch)
tree442642dae3c8e1b50ab68460c2e7662684d9e194 /include/linux/sched.h
parent9a789fcfe8605417f7a1a970355f5efa4fe88c64 (diff)
rseq: Avoid infinite recursion when delivering SIGSEGV
When delivering a signal to a task that is using rseq, we call into __rseq_handle_notify_resume() so that the registers pushed in the sigframe are updated to reflect the state of the restartable sequence (for example, ensuring that the signal returns to the abort handler if necessary). However, if the rseq management fails due to an unrecoverable fault when accessing userspace or certain combinations of RSEQ_CS_* flags, then we will attempt to deliver a SIGSEGV. This has the potential for infinite recursion if the rseq code continuously fails on signal delivery. Avoid this problem by using force_sigsegv() instead of force_sig(), which is explicitly designed to reset the SEGV handler to SIG_DFL in the case of a recursive fault. In doing so, remove rseq_signal_deliver() from the internal rseq API and have an optional struct ksignal * parameter to rseq_handle_notify_resume() instead. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: peterz@infradead.org Cc: paulmck@linux.vnet.ibm.com Cc: boqun.feng@gmail.com Link: https://lkml.kernel.org/r/1529664307-983-1-git-send-email-will.deacon@arm.com
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h18
1 files changed, 11 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c1882643d455..9256118bd40c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
1799 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 1799 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1800} 1800}
1801 1801
1802void __rseq_handle_notify_resume(struct pt_regs *regs); 1802void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1803 1803
1804static inline void rseq_handle_notify_resume(struct pt_regs *regs) 1804static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1805 struct pt_regs *regs)
1805{ 1806{
1806 if (current->rseq) 1807 if (current->rseq)
1807 __rseq_handle_notify_resume(regs); 1808 __rseq_handle_notify_resume(ksig, regs);
1808} 1809}
1809 1810
1810static inline void rseq_signal_deliver(struct pt_regs *regs) 1811static inline void rseq_signal_deliver(struct ksignal *ksig,
1812 struct pt_regs *regs)
1811{ 1813{
1812 preempt_disable(); 1814 preempt_disable();
1813 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); 1815 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
1814 preempt_enable(); 1816 preempt_enable();
1815 rseq_handle_notify_resume(regs); 1817 rseq_handle_notify_resume(ksig, regs);
1816} 1818}
1817 1819
1818/* rseq_preempt() requires preemption to be disabled. */ 1820/* rseq_preempt() requires preemption to be disabled. */
@@ -1861,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
1861static inline void rseq_set_notify_resume(struct task_struct *t) 1863static inline void rseq_set_notify_resume(struct task_struct *t)
1862{ 1864{
1863} 1865}
1864static inline void rseq_handle_notify_resume(struct pt_regs *regs) 1866static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1867 struct pt_regs *regs)
1865{ 1868{
1866} 1869}
1867static inline void rseq_signal_deliver(struct pt_regs *regs) 1870static inline void rseq_signal_deliver(struct ksignal *ksig,
1871 struct pt_regs *regs)
1868{ 1872{
1869} 1873}
1870static inline void rseq_preempt(struct task_struct *t) 1874static inline void rseq_preempt(struct task_struct *t)