aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-12-08 10:42:14 -0500
committerIngo Molnar <mingo@kernel.org>2016-12-11 07:09:19 -0500
commit45dbea5f55c05980cbb4c30047c71a820cd3f282 (patch)
tree9b98e94730b7b533b7cd03c478ac2ffef3162aa4
parent6f38751510073cc054c5dffc3339b22f9385ceed (diff)
x86/paravirt: Fix native_patch()
While chasing a regression I noticed we potentially patch the wrong code in native_patch(). If we do not select the native code sequence, we must use the default patcher, not fall-through the switch case. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alok Kataria <akataria@vmware.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Chris Wright <chrisw@sous-sol.org> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Anvin <hpa@zytor.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kernel test robot <xiaolong.ye@intel.com> Fixes: 3cded4179481 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()") Link: http://lkml.kernel.org/r/20161208154349.270616999@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c4
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c4
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index ff03dbd28625..33cdec221f3d 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -58,15 +58,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
58 end = end_pv_lock_ops_queued_spin_unlock; 58 end = end_pv_lock_ops_queued_spin_unlock;
59 goto patch_site; 59 goto patch_site;
60 } 60 }
61 goto patch_default;
62
61 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): 63 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
62 if (pv_is_native_vcpu_is_preempted()) { 64 if (pv_is_native_vcpu_is_preempted()) {
63 start = start_pv_lock_ops_vcpu_is_preempted; 65 start = start_pv_lock_ops_vcpu_is_preempted;
64 end = end_pv_lock_ops_vcpu_is_preempted; 66 end = end_pv_lock_ops_vcpu_is_preempted;
65 goto patch_site; 67 goto patch_site;
66 } 68 }
69 goto patch_default;
67#endif 70#endif
68 71
69 default: 72 default:
73patch_default:
70 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 74 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
71 break; 75 break;
72 76
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index e61dd9791f4f..b0fceff502b3 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -70,15 +70,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
70 end = end_pv_lock_ops_queued_spin_unlock; 70 end = end_pv_lock_ops_queued_spin_unlock;
71 goto patch_site; 71 goto patch_site;
72 } 72 }
73 goto patch_default;
74
73 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): 75 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
74 if (pv_is_native_vcpu_is_preempted()) { 76 if (pv_is_native_vcpu_is_preempted()) {
75 start = start_pv_lock_ops_vcpu_is_preempted; 77 start = start_pv_lock_ops_vcpu_is_preempted;
76 end = end_pv_lock_ops_vcpu_is_preempted; 78 end = end_pv_lock_ops_vcpu_is_preempted;
77 goto patch_site; 79 goto patch_site;
78 } 80 }
81 goto patch_default;
79#endif 82#endif
80 83
81 default: 84 default:
85patch_default:
82 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 86 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
83 break; 87 break;
84 88