aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2009-09-09 21:53:50 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-09-10 19:50:19 -0400
commit5367b6887e7d8c870a5da7d9b8c6e9c207684e43 (patch)
treeeab362078a7964850b00f8d781aa9c9f5ff01df7 /arch
parentb19ae3999891cad21a3995c34d313dda5df014e2 (diff)
x86: Fix code patching for paravirt-alternatives on 486
As reported in <http://bugs.debian.org/511703> and <http://bugs.debian.org/515982>, kernels with paravirt-alternatives enabled crash in text_poke_early() on at least some 486-class processors. The problem is that text_poke_early() itself uses inline functions affected by paravirt-alternatives and so will modify instructions that have already been prefetched. Pentium and later processors will invalidate the prefetched instructions in this case, but 486-class processors do not. Change sync_core() to limit prefetching on 486-class (and 386-class) processors, and move the call to sync_core() above the call to the modifiable local_irq_restore(). Signed-off-by: Ben Hutchings <ben@decadent.org.uk> LKML-Reference: <1252547631.3423.134.camel@localhost> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/processor.h16
-rw-r--r--arch/x86/kernel/alternative.c2
2 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7768269b1cf..2db56c57a281 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -703,13 +703,23 @@ static inline void cpu_relax(void)
703 rep_nop(); 703 rep_nop();
704} 704}
705 705
706/* Stop speculative execution: */ 706/* Stop speculative execution and prefetching of modified code. */
707static inline void sync_core(void) 707static inline void sync_core(void)
708{ 708{
709 int tmp; 709 int tmp;
710 710
711 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 711#if defined(CONFIG_M386) || defined(CONFIG_M486)
712 : "ebx", "ecx", "edx", "memory"); 712 if (boot_cpu_data.x86 < 5)
713 /* There is no speculative execution.
714 * jmp is a barrier to prefetching. */
715 asm volatile("jmp 1f\n1:\n" ::: "memory");
716 else
717#endif
718 /* cpuid is a barrier to speculative execution.
719 * Prefetched instructions are automatically
720 * invalidated when modified. */
721 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
722 : "ebx", "ecx", "edx", "memory");
713} 723}
714 724
715static inline void __monitor(const void *eax, unsigned long ecx, 725static inline void __monitor(const void *eax, unsigned long ecx,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f57658702571..b8ebd0b689b1 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -490,8 +490,8 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
490 unsigned long flags; 490 unsigned long flags;
491 local_irq_save(flags); 491 local_irq_save(flags);
492 memcpy(addr, opcode, len); 492 memcpy(addr, opcode, len);
493 local_irq_restore(flags);
494 sync_core(); 493 sync_core();
494 local_irq_restore(flags);
495 /* Could also do a CLFLUSH here to speed up CPU recovery; but 495 /* Could also do a CLFLUSH here to speed up CPU recovery; but
496 that causes hangs on some VIA CPUs. */ 496 that causes hangs on some VIA CPUs. */
497 return addr; 497 return addr;