aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-23 08:08:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-23 08:08:53 -0400
commit0200fbdd431519d730b5d399a12840ec832b27cc (patch)
tree2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /arch/x86/mm/fault.c
parentde3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff)
parent01a14bda11add9dcd4a59200f13834d634559935 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar: "Lots of changes in this cycle - in part because locking/core attracted a number of related x86 low level work which was easier to handle in a single tree: - Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E. McKenney, Andrea Parri) - lockdep scalability improvements and micro-optimizations (Waiman Long) - rwsem improvements (Waiman Long) - spinlock micro-optimization (Matthew Wilcox) - qspinlocks: Provide a liveness guarantee (more fairness) on x86. (Peter Zijlstra) - Add support for relative references in jump tables on arm64, x86 and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens) - Be a lot less permissive on weird (kernel address) uaccess faults on x86: BUG() when uaccess helpers fault on kernel addresses (Jann Horn) - macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav Amit) - ... and a handful of other smaller changes as well" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits) locking/lockdep: Make global debug_locks* variables read-mostly locking/lockdep: Fix debug_locks off performance problem locking/pvqspinlock: Extend node size when pvqspinlock is configured locking/qspinlock_stat: Count instances of nested lock slowpaths locking/qspinlock, x86: Provide liveness guarantee x86/asm: 'Simplify' GEN_*_RMWcc() macros locking/qspinlock: Rework some comments locking/qspinlock: Re-order code locking/lockdep: Remove duplicated 'lock_class_ops' percpu array x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y futex: Replace spin_is_locked() with lockdep locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs x86/extable: Macrofy inline assembly code to work around GCC inlining bugs x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs x86/refcount: Work around GCC inlining bug x86/objtool: Use asm macros to work around GCC inlining bugs ...
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a5b9ddb0f1fe..0d45f6debb3a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -46,17 +46,19 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
46 46
47static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 47static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
48{ 48{
49 int ret = 0; 49 if (!kprobes_built_in())
50 50 return 0;
51 /* kprobe_running() needs smp_processor_id() */ 51 if (user_mode(regs))
52 if (kprobes_built_in() && !user_mode(regs)) { 52 return 0;
53 preempt_disable(); 53 /*
54 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 54 * To be potentially processing a kprobe fault and to be allowed to call
55 ret = 1; 55 * kprobe_running(), we have to be non-preemptible.
56 preempt_enable(); 56 */
57 } 57 if (preemptible())
58 58 return 0;
59 return ret; 59 if (!kprobe_running())
60 return 0;
61 return kprobe_fault_handler(regs, X86_TRAP_PF);
60} 62}
61 63
62/* 64/*
@@ -711,7 +713,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
711 int sig; 713 int sig;
712 714
713 /* Are we prepared to handle this kernel fault? */ 715 /* Are we prepared to handle this kernel fault? */
714 if (fixup_exception(regs, X86_TRAP_PF)) { 716 if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
715 /* 717 /*
716 * Any interrupt that takes a fault gets the fixup. This makes 718 * Any interrupt that takes a fault gets the fixup. This makes
717 * the below recursive fault logic only apply to a faults from 719 * the below recursive fault logic only apply to a faults from