aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/alternative.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/alternative.c')
-rw-r--r--arch/x86/kernel/alternative.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e6..2d903b760ddb 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -5,6 +5,7 @@
5#include <linux/kprobes.h> 5#include <linux/kprobes.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/vmalloc.h> 7#include <linux/vmalloc.h>
8#include <linux/memory.h>
8#include <asm/alternative.h> 9#include <asm/alternative.h>
9#include <asm/sections.h> 10#include <asm/sections.h>
10#include <asm/pgtable.h> 11#include <asm/pgtable.h>
@@ -12,7 +13,9 @@
12#include <asm/nmi.h> 13#include <asm/nmi.h>
13#include <asm/vsyscall.h> 14#include <asm/vsyscall.h>
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
15#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/fixmap.h>
16 19
17#define MAX_PATCH_LEN (255-1) 20#define MAX_PATCH_LEN (255-1)
18 21
@@ -226,6 +229,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
226{ 229{
227 u8 **ptr; 230 u8 **ptr;
228 231
232 mutex_lock(&text_mutex);
229 for (ptr = start; ptr < end; ptr++) { 233 for (ptr = start; ptr < end; ptr++) {
230 if (*ptr < text) 234 if (*ptr < text)
231 continue; 235 continue;
@@ -234,6 +238,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
234 /* turn DS segment override prefix into lock prefix */ 238 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1); 239 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
236 }; 240 };
241 mutex_unlock(&text_mutex);
237} 242}
238 243
239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 244static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
@@ -243,6 +248,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
243 if (noreplace_smp) 248 if (noreplace_smp)
244 return; 249 return;
245 250
251 mutex_lock(&text_mutex);
246 for (ptr = start; ptr < end; ptr++) { 252 for (ptr = start; ptr < end; ptr++) {
247 if (*ptr < text) 253 if (*ptr < text)
248 continue; 254 continue;
@@ -251,6 +257,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
251 /* turn lock prefix into DS segment override prefix */ 257 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1); 258 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 259 };
260 mutex_unlock(&text_mutex);
254} 261}
255 262
256struct smp_alt_module { 263struct smp_alt_module {
@@ -414,9 +421,17 @@ void __init alternative_instructions(void)
414 that might execute the to be patched code. 421 that might execute the to be patched code.
415 Other CPUs are not running. */ 422 Other CPUs are not running. */
416 stop_nmi(); 423 stop_nmi();
417#ifdef CONFIG_X86_MCE 424
418 stop_mce(); 425 /*
419#endif 426 * Don't stop machine check exceptions while patching.
427 * MCEs only happen when something got corrupted and in this
428 * case we must do something about the corruption.
429 * Ignoring it is worse than a unlikely patching race.
430 * Also machine checks tend to be broadcast and if one CPU
431 * goes into machine check the others follow quickly, so we don't
432 * expect a machine check to cause undue problems during to code
433 * patching.
434 */
420 435
421 apply_alternatives(__alt_instructions, __alt_instructions_end); 436 apply_alternatives(__alt_instructions, __alt_instructions_end);
422 437
@@ -456,9 +471,6 @@ void __init alternative_instructions(void)
456 (unsigned long)__smp_locks_end); 471 (unsigned long)__smp_locks_end);
457 472
458 restart_nmi(); 473 restart_nmi();
459#ifdef CONFIG_X86_MCE
460 restart_mce();
461#endif
462} 474}
463 475
464/** 476/**
@@ -495,12 +507,13 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
495 * It means the size must be writable atomically and the address must be aligned 507 * It means the size must be writable atomically and the address must be aligned
496 * in a way that permits an atomic write. It also makes sure we fit on a single 508 * in a way that permits an atomic write. It also makes sure we fit on a single
497 * page. 509 * page.
510 *
511 * Note: Must be called under text_mutex.
498 */ 512 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 513void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 514{
501 unsigned long flags; 515 unsigned long flags;
502 char *vaddr; 516 char *vaddr;
503 int nr_pages = 2;
504 struct page *pages[2]; 517 struct page *pages[2];
505 int i; 518 int i;
506 519
@@ -513,14 +526,17 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
513 pages[1] = virt_to_page(addr + PAGE_SIZE); 526 pages[1] = virt_to_page(addr + PAGE_SIZE);
514 } 527 }
515 BUG_ON(!pages[0]); 528 BUG_ON(!pages[0]);
516 if (!pages[1]) 529 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
517 nr_pages = 1; 530 if (pages[1])
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 531 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
519 BUG_ON(!vaddr); 532 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
520 local_irq_save(flags); 533 local_irq_save(flags);
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 534 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 535 local_irq_restore(flags);
523 vunmap(vaddr); 536 clear_fixmap(FIX_TEXT_POKE0);
537 if (pages[1])
538 clear_fixmap(FIX_TEXT_POKE1);
539 local_flush_tlb();
524 sync_core(); 540 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 541 /* Could also do a CLFLUSH here to speed up CPU recovery; but
526 that causes hangs on some VIA CPUs. */ 542 that causes hangs on some VIA CPUs. */