aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-07-15 13:29:37 -0400
committerIngo Molnar <mingo@kernel.org>2015-07-17 06:50:12 -0400
commita27507ca2d796cfa8d907de31ad730359c8a6d06 (patch)
tree5d43b8cd95400c02ee2fef4f99dc8c1fb983a950
parent0b22930ebad563ae97ff3f8d7b9f12060b4c6e6b (diff)
x86/nmi/64: Reorder nested NMI checks
Check the repeat_nmi .. end_repeat_nmi special case first. The next patch will rework the RSP check and, as a side effect, the RSP check will no longer detect repeat_nmi .. end_repeat_nmi, so we'll need this ordering of the checks. Note: this is more subtle than it appears. The check for repeat_nmi .. end_repeat_nmi jumps straight out of the NMI code instead of adjusting the "iret" frame to force a repeat. This is necessary, because the code between repeat_nmi and end_repeat_nmi sets "NMI executing" and then writes to the "iret" frame itself. If a nested NMI comes in and modifies the "iret" frame while repeat_nmi is also modifying it, we'll end up with garbage. The old code got this right, as does the new code, but the new code is a bit more explicit. If we were to move the check right after the "NMI executing" check, then we'd get it wrong and have random crashes. ( Because the "NMI executing" check would jump to the code that would modify the "iret" frame without checking if the interrupted NMI was currently modifying it. ) Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Borislav Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/entry/entry_64.S34
1 files changed, 18 insertions, 16 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f54d63a60a3b..5c4ab384b84f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1361,7 +1361,24 @@ ENTRY(nmi)
1361 /* 1361 /*
1362 * Determine whether we're a nested NMI. 1362 * Determine whether we're a nested NMI.
1363 * 1363 *
1364 * First check "NMI executing". If it's set, then we're nested. 1364 * If we interrupted kernel code between repeat_nmi and
1365 * end_repeat_nmi, then we are a nested NMI. We must not
1366 * modify the "iret" frame because it's being written by
1367 * the outer NMI. That's okay; the outer NMI handler is
1368 * about to about to call do_nmi anyway, so we can just
1369 * resume the outer NMI.
1370 */
1371
1372 movq $repeat_nmi, %rdx
1373 cmpq 8(%rsp), %rdx
1374 ja 1f
1375 movq $end_repeat_nmi, %rdx
1376 cmpq 8(%rsp), %rdx
1377 ja nested_nmi_out
13781:
1379
1380 /*
1381 * Now check "NMI executing". If it's set, then we're nested.
1365 * This will not detect if we interrupted an outer NMI just 1382 * This will not detect if we interrupted an outer NMI just
1366 * before IRET. 1383 * before IRET.
1367 */ 1384 */
@@ -1387,21 +1404,6 @@ ENTRY(nmi)
1387 1404
1388nested_nmi: 1405nested_nmi:
1389 /* 1406 /*
1390 * If we interrupted an NMI that is between repeat_nmi and
1391 * end_repeat_nmi, then we must not modify the "iret" frame
1392 * because it's being written by the outer NMI. That's okay;
1393 * the outer NMI handler is about to call do_nmi anyway,
1394 * so we can just resume the outer NMI.
1395 */
1396 movq $repeat_nmi, %rdx
1397 cmpq 8(%rsp), %rdx
1398 ja 1f
1399 movq $end_repeat_nmi, %rdx
1400 cmpq 8(%rsp), %rdx
1401 ja nested_nmi_out
1402
14031:
1404 /*
1405 * Modify the "iret" frame to point to repeat_nmi, forcing another 1407 * Modify the "iret" frame to point to repeat_nmi, forcing another
1406 * iteration of NMI handling. 1408 * iteration of NMI handling.
1407 */ 1409 */