aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c88
1 files changed, 34 insertions, 54 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b0ff378650a9..3109ba6c6ede 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -30,26 +30,6 @@
30#include <asm/trace/exceptions.h> 30#include <asm/trace/exceptions.h>
31 31
32/* 32/*
33 * Page fault error code bits:
34 *
35 * bit 0 == 0: no page found 1: protection fault
36 * bit 1 == 0: read access 1: write access
37 * bit 2 == 0: kernel-mode access 1: user-mode access
38 * bit 3 == 1: use of reserved bit detected
39 * bit 4 == 1: fault was an instruction fetch
40 * bit 5 == 1: protection keys block access
41 */
42enum x86_pf_error_code {
43
44 PF_PROT = 1 << 0,
45 PF_WRITE = 1 << 1,
46 PF_USER = 1 << 2,
47 PF_RSVD = 1 << 3,
48 PF_INSTR = 1 << 4,
49 PF_PK = 1 << 5,
50};
51
52/*
53 * Returns 0 if mmiotrace is disabled, or if the fault is not 33 * Returns 0 if mmiotrace is disabled, or if the fault is not
54 * handled by mmiotrace: 34 * handled by mmiotrace:
55 */ 35 */
@@ -150,7 +130,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
150 * If it was a exec (instruction fetch) fault on NX page, then 130 * If it was a exec (instruction fetch) fault on NX page, then
151 * do not ignore the fault: 131 * do not ignore the fault:
152 */ 132 */
153 if (error_code & PF_INSTR) 133 if (error_code & X86_PF_INSTR)
154 return 0; 134 return 0;
155 135
156 instr = (void *)convert_ip_to_linear(current, regs); 136 instr = (void *)convert_ip_to_linear(current, regs);
@@ -180,7 +160,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
180 * siginfo so userspace can discover which protection key was set 160 * siginfo so userspace can discover which protection key was set
181 * on the PTE. 161 * on the PTE.
182 * 162 *
183 * If we get here, we know that the hardware signaled a PF_PK 163 * If we get here, we know that the hardware signaled a X86_PF_PK
184 * fault and that there was a VMA once we got in the fault 164 * fault and that there was a VMA once we got in the fault
185 * handler. It does *not* guarantee that the VMA we find here 165 * handler. It does *not* guarantee that the VMA we find here
186 * was the one that we faulted on. 166 * was the one that we faulted on.
@@ -205,7 +185,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
205 /* 185 /*
206 * force_sig_info_fault() is called from a number of 186 * force_sig_info_fault() is called from a number of
207 * contexts, some of which have a VMA and some of which 187 * contexts, some of which have a VMA and some of which
208 * do not. The PF_PK handing happens after we have a 188 * do not. The X86_PF_PK handing happens after we have a
209 * valid VMA, so we should never reach this without a 189 * valid VMA, so we should never reach this without a
210 * valid VMA. 190 * valid VMA.
211 */ 191 */
@@ -698,7 +678,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
698 if (!oops_may_print()) 678 if (!oops_may_print())
699 return; 679 return;
700 680
701 if (error_code & PF_INSTR) { 681 if (error_code & X86_PF_INSTR) {
702 unsigned int level; 682 unsigned int level;
703 pgd_t *pgd; 683 pgd_t *pgd;
704 pte_t *pte; 684 pte_t *pte;
@@ -780,7 +760,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
780 */ 760 */
781 if (current->thread.sig_on_uaccess_err && signal) { 761 if (current->thread.sig_on_uaccess_err && signal) {
782 tsk->thread.trap_nr = X86_TRAP_PF; 762 tsk->thread.trap_nr = X86_TRAP_PF;
783 tsk->thread.error_code = error_code | PF_USER; 763 tsk->thread.error_code = error_code | X86_PF_USER;
784 tsk->thread.cr2 = address; 764 tsk->thread.cr2 = address;
785 765
786 /* XXX: hwpoison faults will set the wrong code. */ 766 /* XXX: hwpoison faults will set the wrong code. */
@@ -898,7 +878,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
898 struct task_struct *tsk = current; 878 struct task_struct *tsk = current;
899 879
900 /* User mode accesses just cause a SIGSEGV */ 880 /* User mode accesses just cause a SIGSEGV */
901 if (error_code & PF_USER) { 881 if (error_code & X86_PF_USER) {
902 /* 882 /*
903 * It's possible to have interrupts off here: 883 * It's possible to have interrupts off here:
904 */ 884 */
@@ -919,7 +899,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
919 * Instruction fetch faults in the vsyscall page might need 899 * Instruction fetch faults in the vsyscall page might need
920 * emulation. 900 * emulation.
921 */ 901 */
922 if (unlikely((error_code & PF_INSTR) && 902 if (unlikely((error_code & X86_PF_INSTR) &&
923 ((address & ~0xfff) == VSYSCALL_ADDR))) { 903 ((address & ~0xfff) == VSYSCALL_ADDR))) {
924 if (emulate_vsyscall(regs, address)) 904 if (emulate_vsyscall(regs, address))
925 return; 905 return;
@@ -932,7 +912,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
932 * are always protection faults. 912 * are always protection faults.
933 */ 913 */
934 if (address >= TASK_SIZE_MAX) 914 if (address >= TASK_SIZE_MAX)
935 error_code |= PF_PROT; 915 error_code |= X86_PF_PROT;
936 916
937 if (likely(show_unhandled_signals)) 917 if (likely(show_unhandled_signals))
938 show_signal_msg(regs, error_code, address, tsk); 918 show_signal_msg(regs, error_code, address, tsk);
@@ -993,11 +973,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
993 973
994 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 974 if (!boot_cpu_has(X86_FEATURE_OSPKE))
995 return false; 975 return false;
996 if (error_code & PF_PK) 976 if (error_code & X86_PF_PK)
997 return true; 977 return true;
998 /* this checks permission keys on the VMA: */ 978 /* this checks permission keys on the VMA: */
999 if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), 979 if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1000 (error_code & PF_INSTR), foreign)) 980 (error_code & X86_PF_INSTR), foreign))
1001 return true; 981 return true;
1002 return false; 982 return false;
1003} 983}
@@ -1025,7 +1005,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
1025 int code = BUS_ADRERR; 1005 int code = BUS_ADRERR;
1026 1006
1027 /* Kernel mode? Handle exceptions or die: */ 1007 /* Kernel mode? Handle exceptions or die: */
1028 if (!(error_code & PF_USER)) { 1008 if (!(error_code & X86_PF_USER)) {
1029 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 1009 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
1030 return; 1010 return;
1031 } 1011 }
@@ -1053,14 +1033,14 @@ static noinline void
1053mm_fault_error(struct pt_regs *regs, unsigned long error_code, 1033mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1054 unsigned long address, u32 *pkey, unsigned int fault) 1034 unsigned long address, u32 *pkey, unsigned int fault)
1055{ 1035{
1056 if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 1036 if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
1057 no_context(regs, error_code, address, 0, 0); 1037 no_context(regs, error_code, address, 0, 0);
1058 return; 1038 return;
1059 } 1039 }
1060 1040
1061 if (fault & VM_FAULT_OOM) { 1041 if (fault & VM_FAULT_OOM) {
1062 /* Kernel mode? Handle exceptions or die: */ 1042 /* Kernel mode? Handle exceptions or die: */
1063 if (!(error_code & PF_USER)) { 1043 if (!(error_code & X86_PF_USER)) {
1064 no_context(regs, error_code, address, 1044 no_context(regs, error_code, address,
1065 SIGSEGV, SEGV_MAPERR); 1045 SIGSEGV, SEGV_MAPERR);
1066 return; 1046 return;
@@ -1085,16 +1065,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1085 1065
1086static int spurious_fault_check(unsigned long error_code, pte_t *pte) 1066static int spurious_fault_check(unsigned long error_code, pte_t *pte)
1087{ 1067{
1088 if ((error_code & PF_WRITE) && !pte_write(*pte)) 1068 if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1089 return 0; 1069 return 0;
1090 1070
1091 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 1071 if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1092 return 0; 1072 return 0;
1093 /* 1073 /*
1094 * Note: We do not do lazy flushing on protection key 1074 * Note: We do not do lazy flushing on protection key
1095 * changes, so no spurious fault will ever set PF_PK. 1075 * changes, so no spurious fault will ever set X86_PF_PK.
1096 */ 1076 */
1097 if ((error_code & PF_PK)) 1077 if ((error_code & X86_PF_PK))
1098 return 1; 1078 return 1;
1099 1079
1100 return 1; 1080 return 1;
@@ -1140,8 +1120,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
1140 * change, so user accesses are not expected to cause spurious 1120 * change, so user accesses are not expected to cause spurious
1141 * faults. 1121 * faults.
1142 */ 1122 */
1143 if (error_code != (PF_WRITE | PF_PROT) 1123 if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
1144 && error_code != (PF_INSTR | PF_PROT)) 1124 error_code != (X86_PF_INSTR | X86_PF_PROT))
1145 return 0; 1125 return 0;
1146 1126
1147 pgd = init_mm.pgd + pgd_index(address); 1127 pgd = init_mm.pgd + pgd_index(address);
@@ -1201,19 +1181,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
1201 * always an unconditional error and can never result in 1181 * always an unconditional error and can never result in
1202 * a follow-up action to resolve the fault, like a COW. 1182 * a follow-up action to resolve the fault, like a COW.
1203 */ 1183 */
1204 if (error_code & PF_PK) 1184 if (error_code & X86_PF_PK)
1205 return 1; 1185 return 1;
1206 1186
1207 /* 1187 /*
1208 * Make sure to check the VMA so that we do not perform 1188 * Make sure to check the VMA so that we do not perform
1209 * faults just to hit a PF_PK as soon as we fill in a 1189 * faults just to hit a X86_PF_PK as soon as we fill in a
1210 * page. 1190 * page.
1211 */ 1191 */
1212 if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), 1192 if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1213 (error_code & PF_INSTR), foreign)) 1193 (error_code & X86_PF_INSTR), foreign))
1214 return 1; 1194 return 1;
1215 1195
1216 if (error_code & PF_WRITE) { 1196 if (error_code & X86_PF_WRITE) {
1217 /* write, present and write, not present: */ 1197 /* write, present and write, not present: */
1218 if (unlikely(!(vma->vm_flags & VM_WRITE))) 1198 if (unlikely(!(vma->vm_flags & VM_WRITE)))
1219 return 1; 1199 return 1;
@@ -1221,7 +1201,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
1221 } 1201 }
1222 1202
1223 /* read, present: */ 1203 /* read, present: */
1224 if (unlikely(error_code & PF_PROT)) 1204 if (unlikely(error_code & X86_PF_PROT))
1225 return 1; 1205 return 1;
1226 1206
1227 /* read, not present: */ 1207 /* read, not present: */
@@ -1244,7 +1224,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
1244 if (!static_cpu_has(X86_FEATURE_SMAP)) 1224 if (!static_cpu_has(X86_FEATURE_SMAP))
1245 return false; 1225 return false;
1246 1226
1247 if (error_code & PF_USER) 1227 if (error_code & X86_PF_USER)
1248 return false; 1228 return false;
1249 1229
1250 if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 1230 if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
@@ -1297,7 +1277,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1297 * protection error (error_code & 9) == 0. 1277 * protection error (error_code & 9) == 0.
1298 */ 1278 */
1299 if (unlikely(fault_in_kernel_space(address))) { 1279 if (unlikely(fault_in_kernel_space(address))) {
1300 if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1280 if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1301 if (vmalloc_fault(address) >= 0) 1281 if (vmalloc_fault(address) >= 0)
1302 return; 1282 return;
1303 1283
@@ -1325,7 +1305,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1325 if (unlikely(kprobes_fault(regs))) 1305 if (unlikely(kprobes_fault(regs)))
1326 return; 1306 return;
1327 1307
1328 if (unlikely(error_code & PF_RSVD)) 1308 if (unlikely(error_code & X86_PF_RSVD))
1329 pgtable_bad(regs, error_code, address); 1309 pgtable_bad(regs, error_code, address);
1330 1310
1331 if (unlikely(smap_violation(error_code, regs))) { 1311 if (unlikely(smap_violation(error_code, regs))) {
@@ -1351,7 +1331,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1351 */ 1331 */
1352 if (user_mode(regs)) { 1332 if (user_mode(regs)) {
1353 local_irq_enable(); 1333 local_irq_enable();
1354 error_code |= PF_USER; 1334 error_code |= X86_PF_USER;
1355 flags |= FAULT_FLAG_USER; 1335 flags |= FAULT_FLAG_USER;
1356 } else { 1336 } else {
1357 if (regs->flags & X86_EFLAGS_IF) 1337 if (regs->flags & X86_EFLAGS_IF)
@@ -1360,9 +1340,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1360 1340
1361 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 1341 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1362 1342
1363 if (error_code & PF_WRITE) 1343 if (error_code & X86_PF_WRITE)
1364 flags |= FAULT_FLAG_WRITE; 1344 flags |= FAULT_FLAG_WRITE;
1365 if (error_code & PF_INSTR) 1345 if (error_code & X86_PF_INSTR)
1366 flags |= FAULT_FLAG_INSTRUCTION; 1346 flags |= FAULT_FLAG_INSTRUCTION;
1367 1347
1368 /* 1348 /*
@@ -1382,7 +1362,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1382 * space check, thus avoiding the deadlock: 1362 * space check, thus avoiding the deadlock:
1383 */ 1363 */
1384 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1364 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1385 if ((error_code & PF_USER) == 0 && 1365 if (!(error_code & X86_PF_USER) &&
1386 !search_exception_tables(regs->ip)) { 1366 !search_exception_tables(regs->ip)) {
1387 bad_area_nosemaphore(regs, error_code, address, NULL); 1367 bad_area_nosemaphore(regs, error_code, address, NULL);
1388 return; 1368 return;
@@ -1409,7 +1389,7 @@ retry:
1409 bad_area(regs, error_code, address); 1389 bad_area(regs, error_code, address);
1410 return; 1390 return;
1411 } 1391 }
1412 if (error_code & PF_USER) { 1392 if (error_code & X86_PF_USER) {
1413 /* 1393 /*
1414 * Accessing the stack below %sp is always a bug. 1394 * Accessing the stack below %sp is always a bug.
1415 * The large cushion allows instructions like enter 1395 * The large cushion allows instructions like enter