aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2018-11-21 18:11:22 -0500
committerIngo Molnar <mingo@kernel.org>2018-11-22 03:22:59 -0500
commit0ed32f1aa66ee758e6c8164f549f7ff9d399a20e (patch)
tree502e38ef2ac0c53f0a9a3d78b5d7c35b3b377f57
parent1ad33f5aec20f53785dbad44c6fb3b204aefd921 (diff)
x86/fault: Remove sw_error_code
All of the fault handling code now corrently checks user_mode(regs) as needed, and nothing depends on the X86_PF_USER bit being munged. Get rid of the sw_error code and use hw_error_code everywhere. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yu-cheng Yu <yu-cheng.yu@intel.com> Link: http://lkml.kernel.org/r/078f5b8ae6e8c79ff8ee7345b5c476c45003e5ac.1542841400.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/fault.c50
1 files changed, 11 insertions, 39 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b898a38093a3..82881bc5feef 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1217,7 +1217,6 @@ void do_user_addr_fault(struct pt_regs *regs,
1217 unsigned long hw_error_code, 1217 unsigned long hw_error_code,
1218 unsigned long address) 1218 unsigned long address)
1219{ 1219{
1220 unsigned long sw_error_code;
1221 struct vm_area_struct *vma; 1220 struct vm_area_struct *vma;
1222 struct task_struct *tsk; 1221 struct task_struct *tsk;
1223 struct mm_struct *mm; 1222 struct mm_struct *mm;
@@ -1263,13 +1262,6 @@ void do_user_addr_fault(struct pt_regs *regs,
1263 } 1262 }
1264 1263
1265 /* 1264 /*
1266 * hw_error_code is literally the "page fault error code" passed to
1267 * the kernel directly from the hardware. But, we will shortly be
1268 * modifying it in software, so give it a new name.
1269 */
1270 sw_error_code = hw_error_code;
1271
1272 /*
1273 * It's safe to allow irq's after cr2 has been saved and the 1265 * It's safe to allow irq's after cr2 has been saved and the
1274 * vmalloc fault has been handled. 1266 * vmalloc fault has been handled.
1275 * 1267 *
@@ -1278,26 +1270,6 @@ void do_user_addr_fault(struct pt_regs *regs,
1278 */ 1270 */
1279 if (user_mode(regs)) { 1271 if (user_mode(regs)) {
1280 local_irq_enable(); 1272 local_irq_enable();
1281 /*
1282 * Up to this point, X86_PF_USER set in hw_error_code
1283 * indicated a user-mode access. But, after this,
1284 * X86_PF_USER in sw_error_code will indicate either
1285 * that, *or* an implicit kernel(supervisor)-mode access
1286 * which originated from user mode.
1287 */
1288 if (!(hw_error_code & X86_PF_USER)) {
1289 /*
1290 * The CPU was in user mode, but the CPU says
1291 * the fault was not a user-mode access.
1292 * Must be an implicit kernel-mode access,
1293 * which we do not expect to happen in the
1294 * user address space.
1295 */
1296 pr_warn_once("kernel-mode error from user-mode: %lx\n",
1297 hw_error_code);
1298
1299 sw_error_code |= X86_PF_USER;
1300 }
1301 flags |= FAULT_FLAG_USER; 1273 flags |= FAULT_FLAG_USER;
1302 } else { 1274 } else {
1303 if (regs->flags & X86_EFLAGS_IF) 1275 if (regs->flags & X86_EFLAGS_IF)
@@ -1306,9 +1278,9 @@ void do_user_addr_fault(struct pt_regs *regs,
1306 1278
1307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 1279 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1308 1280
1309 if (sw_error_code & X86_PF_WRITE) 1281 if (hw_error_code & X86_PF_WRITE)
1310 flags |= FAULT_FLAG_WRITE; 1282 flags |= FAULT_FLAG_WRITE;
1311 if (sw_error_code & X86_PF_INSTR) 1283 if (hw_error_code & X86_PF_INSTR)
1312 flags |= FAULT_FLAG_INSTRUCTION; 1284 flags |= FAULT_FLAG_INSTRUCTION;
1313 1285
1314#ifdef CONFIG_X86_64 1286#ifdef CONFIG_X86_64
@@ -1321,7 +1293,7 @@ void do_user_addr_fault(struct pt_regs *regs,
1321 * The vsyscall page does not have a "real" VMA, so do this 1293 * The vsyscall page does not have a "real" VMA, so do this
1322 * emulation before we go searching for VMAs. 1294 * emulation before we go searching for VMAs.
1323 */ 1295 */
1324 if ((sw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { 1296 if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
1325 if (emulate_vsyscall(regs, address)) 1297 if (emulate_vsyscall(regs, address))
1326 return; 1298 return;
1327 } 1299 }
@@ -1345,7 +1317,7 @@ void do_user_addr_fault(struct pt_regs *regs,
1345 * Fault from code in kernel from 1317 * Fault from code in kernel from
1346 * which we do not expect faults. 1318 * which we do not expect faults.
1347 */ 1319 */
1348 bad_area_nosemaphore(regs, sw_error_code, address); 1320 bad_area_nosemaphore(regs, hw_error_code, address);
1349 return; 1321 return;
1350 } 1322 }
1351retry: 1323retry:
@@ -1361,17 +1333,17 @@ retry:
1361 1333
1362 vma = find_vma(mm, address); 1334 vma = find_vma(mm, address);
1363 if (unlikely(!vma)) { 1335 if (unlikely(!vma)) {
1364 bad_area(regs, sw_error_code, address); 1336 bad_area(regs, hw_error_code, address);
1365 return; 1337 return;
1366 } 1338 }
1367 if (likely(vma->vm_start <= address)) 1339 if (likely(vma->vm_start <= address))
1368 goto good_area; 1340 goto good_area;
1369 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 1341 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1370 bad_area(regs, sw_error_code, address); 1342 bad_area(regs, hw_error_code, address);
1371 return; 1343 return;
1372 } 1344 }
1373 if (unlikely(expand_stack(vma, address))) { 1345 if (unlikely(expand_stack(vma, address))) {
1374 bad_area(regs, sw_error_code, address); 1346 bad_area(regs, hw_error_code, address);
1375 return; 1347 return;
1376 } 1348 }
1377 1349
@@ -1380,8 +1352,8 @@ retry:
1380 * we can handle it.. 1352 * we can handle it..
1381 */ 1353 */
1382good_area: 1354good_area:
1383 if (unlikely(access_error(sw_error_code, vma))) { 1355 if (unlikely(access_error(hw_error_code, vma))) {
1384 bad_area_access_error(regs, sw_error_code, address, vma); 1356 bad_area_access_error(regs, hw_error_code, address, vma);
1385 return; 1357 return;
1386 } 1358 }
1387 1359
@@ -1420,13 +1392,13 @@ good_area:
1420 return; 1392 return;
1421 1393
1422 /* Not returning to user mode? Handle exceptions or die: */ 1394 /* Not returning to user mode? Handle exceptions or die: */
1423 no_context(regs, sw_error_code, address, SIGBUS, BUS_ADRERR); 1395 no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
1424 return; 1396 return;
1425 } 1397 }
1426 1398
1427 up_read(&mm->mmap_sem); 1399 up_read(&mm->mmap_sem);
1428 if (unlikely(fault & VM_FAULT_ERROR)) { 1400 if (unlikely(fault & VM_FAULT_ERROR)) {
1429 mm_fault_error(regs, sw_error_code, address, fault); 1401 mm_fault_error(regs, hw_error_code, address, fault);
1430 return; 1402 return;
1431 } 1403 }
1432 1404