aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Pen <roman.penyaev@profitbricks.com>2017-06-01 04:55:03 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2017-06-01 05:21:17 -0400
commitd9c1b5431d5f0e07575db785a022bce91051ac1d (patch)
tree3e8b000a15b788298f7f78f8ae56d80b947faecc
parent8eae9570d1d3887487be0b355d12656b46fac226 (diff)
KVM: SVM: do not zero out segment attributes if segment is unusable or not present
This is a fix for the problem [1], where VMCB.CPL was set to 0 and interrupt was taken on userspace stack. The root cause lies in the specific AMD CPU behaviour which manifests itself as unusable segment attributes on SYSRET. The corresponding work around for the kernel is the following: 61f01dd941ba ("x86_64, asm: Work around AMD SYSRET SS descriptor attribute issue") In other turn virtualization side treated unusable segment incorrectly and restored CPL from SS attributes, which were zeroed out few lines above. In current patch it is assured only that P bit is cleared in VMCB.save state and segment attributes are not zeroed out if segment is not presented or is unusable, therefore CPL can be safely restored from DPL field. This is only one part of the fix, since QEMU side should be fixed accordingly not to zero out attributes on its side. Corresponding patch will follow. [1] Message id: CAJrWOzD6Xq==b-zYCDdFLgSRMPM-NkNuTSDFEtX=7MreT45i7Q@mail.gmail.com Signed-off-by: Roman Pen <roman.penyaev@profitbricks.com> Signed-off-by: Mikhail Sennikovskii <mikhail.sennikovskii@profitbricks.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim KrÄmář <rkrcmar@redhat.com> Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a654372efea1..ba9891ac5c56 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
1840 */ 1840 */
1841 if (var->unusable) 1841 if (var->unusable)
1842 var->db = 0; 1842 var->db = 0;
1843 /* This is symmetric with svm_set_segment() */
1843 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1844 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1844 break; 1845 break;
1845 } 1846 }
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1980 s->base = var->base; 1981 s->base = var->base;
1981 s->limit = var->limit; 1982 s->limit = var->limit;
1982 s->selector = var->selector; 1983 s->selector = var->selector;
1983 if (var->unusable) 1984 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1984 s->attrib = 0; 1985 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1985 else { 1986 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1986 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1987 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1987 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1988 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1988 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1989 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1989 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 1990 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1990 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1991 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1991 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1992 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1993 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1994 }
1995 1992
1996 /* 1993 /*
1997 * This is always accurate, except if SYSRET returned to a segment 1994 * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
2000 * would entail passing the CPL to userspace and back. 1997 * would entail passing the CPL to userspace and back.
2001 */ 1998 */
2002 if (seg == VCPU_SREG_SS) 1999 if (seg == VCPU_SREG_SS)
2003 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 2000 /* This is symmetric with svm_get_segment() */
2001 svm->vmcb->save.cpl = (var->dpl & 3);
2004 2002
2005 mark_dirty(svm->vmcb, VMCB_SEG); 2003 mark_dirty(svm->vmcb, VMCB_SEG);
2006} 2004}