aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoedel, Joerg <Joerg.Roedel@amd.com>2010-12-03 04:50:51 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:30:10 -0500
commit4ee546b434504a618eac40421e595c68e494da9f (patch)
treece278bb56d8b9d3f7f3fa42ed10a3a1ecf8ce5a7
parent384c636843971c8ebbffd1cc8881e3184cbd23e2 (diff)
KVM: SVM: Add manipulation functions for CRx intercepts
This patch wraps changes to the CRx intercepts of SVM into seperate functions to abstract nested-svm better and prepare the implementation of the vmcb-clean-bits feature. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/svm.h15
-rw-r--r--arch/x86/kvm/svm.c120
2 files changed, 73 insertions, 62 deletions
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e831059ac5a..39f9ddf07136 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -51,8 +51,7 @@ enum {
51 51
52 52
53struct __attribute__ ((__packed__)) vmcb_control_area { 53struct __attribute__ ((__packed__)) vmcb_control_area {
54 u16 intercept_cr_read; 54 u32 intercept_cr;
55 u16 intercept_cr_write;
56 u16 intercept_dr_read; 55 u16 intercept_dr_read;
57 u16 intercept_dr_write; 56 u16 intercept_dr_write;
58 u32 intercept_exceptions; 57 u32 intercept_exceptions;
@@ -204,10 +203,14 @@ struct __attribute__ ((__packed__)) vmcb {
204#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK 203#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
205#define SVM_SELECTOR_CODE_MASK (1 << 3) 204#define SVM_SELECTOR_CODE_MASK (1 << 3)
206 205
207#define INTERCEPT_CR0_MASK 1 206#define INTERCEPT_CR0_READ 0
208#define INTERCEPT_CR3_MASK (1 << 3) 207#define INTERCEPT_CR3_READ 3
209#define INTERCEPT_CR4_MASK (1 << 4) 208#define INTERCEPT_CR4_READ 4
210#define INTERCEPT_CR8_MASK (1 << 8) 209#define INTERCEPT_CR8_READ 8
210#define INTERCEPT_CR0_WRITE (16 + 0)
211#define INTERCEPT_CR3_WRITE (16 + 3)
212#define INTERCEPT_CR4_WRITE (16 + 4)
213#define INTERCEPT_CR8_WRITE (16 + 8)
211 214
212#define INTERCEPT_DR0_MASK 1 215#define INTERCEPT_DR0_MASK 1
213#define INTERCEPT_DR1_MASK (1 << 1) 216#define INTERCEPT_DR1_MASK (1 << 1)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 74f89f0b9e3f..1e7bb9c77084 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -98,8 +98,7 @@ struct nested_state {
98 unsigned long vmexit_rax; 98 unsigned long vmexit_rax;
99 99
100 /* cache for intercepts of the guest */ 100 /* cache for intercepts of the guest */
101 u16 intercept_cr_read; 101 u32 intercept_cr;
102 u16 intercept_cr_write;
103 u16 intercept_dr_read; 102 u16 intercept_dr_read;
104 u16 intercept_dr_write; 103 u16 intercept_dr_write;
105 u32 intercept_exceptions; 104 u32 intercept_exceptions;
@@ -204,14 +203,46 @@ static void recalc_intercepts(struct vcpu_svm *svm)
204 h = &svm->nested.hsave->control; 203 h = &svm->nested.hsave->control;
205 g = &svm->nested; 204 g = &svm->nested;
206 205
207 c->intercept_cr_read = h->intercept_cr_read | g->intercept_cr_read; 206 c->intercept_cr = h->intercept_cr | g->intercept_cr;
208 c->intercept_cr_write = h->intercept_cr_write | g->intercept_cr_write;
209 c->intercept_dr_read = h->intercept_dr_read | g->intercept_dr_read; 207 c->intercept_dr_read = h->intercept_dr_read | g->intercept_dr_read;
210 c->intercept_dr_write = h->intercept_dr_write | g->intercept_dr_write; 208 c->intercept_dr_write = h->intercept_dr_write | g->intercept_dr_write;
211 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; 209 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
212 c->intercept = h->intercept | g->intercept; 210 c->intercept = h->intercept | g->intercept;
213} 211}
214 212
213static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
214{
215 if (is_guest_mode(&svm->vcpu))
216 return svm->nested.hsave;
217 else
218 return svm->vmcb;
219}
220
221static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
222{
223 struct vmcb *vmcb = get_host_vmcb(svm);
224
225 vmcb->control.intercept_cr |= (1U << bit);
226
227 recalc_intercepts(svm);
228}
229
230static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
231{
232 struct vmcb *vmcb = get_host_vmcb(svm);
233
234 vmcb->control.intercept_cr &= ~(1U << bit);
235
236 recalc_intercepts(svm);
237}
238
239static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
240{
241 struct vmcb *vmcb = get_host_vmcb(svm);
242
243 return vmcb->control.intercept_cr & (1U << bit);
244}
245
215static inline void enable_gif(struct vcpu_svm *svm) 246static inline void enable_gif(struct vcpu_svm *svm)
216{ 247{
217 svm->vcpu.arch.hflags |= HF_GIF_MASK; 248 svm->vcpu.arch.hflags |= HF_GIF_MASK;
@@ -766,15 +797,15 @@ static void init_vmcb(struct vcpu_svm *svm)
766 struct vmcb_save_area *save = &svm->vmcb->save; 797 struct vmcb_save_area *save = &svm->vmcb->save;
767 798
768 svm->vcpu.fpu_active = 1; 799 svm->vcpu.fpu_active = 1;
800 svm->vcpu.arch.hflags = 0;
769 801
770 control->intercept_cr_read = INTERCEPT_CR0_MASK | 802 set_cr_intercept(svm, INTERCEPT_CR0_READ);
771 INTERCEPT_CR3_MASK | 803 set_cr_intercept(svm, INTERCEPT_CR3_READ);
772 INTERCEPT_CR4_MASK; 804 set_cr_intercept(svm, INTERCEPT_CR4_READ);
773 805 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
774 control->intercept_cr_write = INTERCEPT_CR0_MASK | 806 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
775 INTERCEPT_CR3_MASK | 807 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
776 INTERCEPT_CR4_MASK | 808 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
777 INTERCEPT_CR8_MASK;
778 809
779 control->intercept_dr_read = INTERCEPT_DR0_MASK | 810 control->intercept_dr_read = INTERCEPT_DR0_MASK |
780 INTERCEPT_DR1_MASK | 811 INTERCEPT_DR1_MASK |
@@ -875,8 +906,8 @@ static void init_vmcb(struct vcpu_svm *svm)
875 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | 906 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
876 (1ULL << INTERCEPT_INVLPG)); 907 (1ULL << INTERCEPT_INVLPG));
877 control->intercept_exceptions &= ~(1 << PF_VECTOR); 908 control->intercept_exceptions &= ~(1 << PF_VECTOR);
878 control->intercept_cr_read &= ~INTERCEPT_CR3_MASK; 909 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
879 control->intercept_cr_write &= ~INTERCEPT_CR3_MASK; 910 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
880 save->g_pat = 0x0007040600070406ULL; 911 save->g_pat = 0x0007040600070406ULL;
881 save->cr3 = 0; 912 save->cr3 = 0;
882 save->cr4 = 0; 913 save->cr4 = 0;
@@ -1210,7 +1241,6 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1210 1241
1211static void update_cr0_intercept(struct vcpu_svm *svm) 1242static void update_cr0_intercept(struct vcpu_svm *svm)
1212{ 1243{
1213 struct vmcb *vmcb = svm->vmcb;
1214 ulong gcr0 = svm->vcpu.arch.cr0; 1244 ulong gcr0 = svm->vcpu.arch.cr0;
1215 u64 *hcr0 = &svm->vmcb->save.cr0; 1245 u64 *hcr0 = &svm->vmcb->save.cr0;
1216 1246
@@ -1222,25 +1252,11 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
1222 1252
1223 1253
1224 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { 1254 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1225 vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; 1255 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1226 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; 1256 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1227 if (is_guest_mode(&svm->vcpu)) {
1228 struct vmcb *hsave = svm->nested.hsave;
1229
1230 hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
1231 hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
1232 vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
1233 vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
1234 }
1235 } else { 1257 } else {
1236 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; 1258 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1237 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; 1259 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1238 if (is_guest_mode(&svm->vcpu)) {
1239 struct vmcb *hsave = svm->nested.hsave;
1240
1241 hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
1242 hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
1243 }
1244 } 1260 }
1245} 1261}
1246 1262
@@ -1901,15 +1917,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
1901 case SVM_EXIT_IOIO: 1917 case SVM_EXIT_IOIO:
1902 vmexit = nested_svm_intercept_ioio(svm); 1918 vmexit = nested_svm_intercept_ioio(svm);
1903 break; 1919 break;
1904 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: { 1920 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1905 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0); 1921 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
1906 if (svm->nested.intercept_cr_read & cr_bits) 1922 if (svm->nested.intercept_cr & bit)
1907 vmexit = NESTED_EXIT_DONE;
1908 break;
1909 }
1910 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1911 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1912 if (svm->nested.intercept_cr_write & cr_bits)
1913 vmexit = NESTED_EXIT_DONE; 1923 vmexit = NESTED_EXIT_DONE;
1914 break; 1924 break;
1915 } 1925 }
@@ -1966,8 +1976,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
1966 struct vmcb_control_area *dst = &dst_vmcb->control; 1976 struct vmcb_control_area *dst = &dst_vmcb->control;
1967 struct vmcb_control_area *from = &from_vmcb->control; 1977 struct vmcb_control_area *from = &from_vmcb->control;
1968 1978
1969 dst->intercept_cr_read = from->intercept_cr_read; 1979 dst->intercept_cr = from->intercept_cr;
1970 dst->intercept_cr_write = from->intercept_cr_write;
1971 dst->intercept_dr_read = from->intercept_dr_read; 1980 dst->intercept_dr_read = from->intercept_dr_read;
1972 dst->intercept_dr_write = from->intercept_dr_write; 1981 dst->intercept_dr_write = from->intercept_dr_write;
1973 dst->intercept_exceptions = from->intercept_exceptions; 1982 dst->intercept_exceptions = from->intercept_exceptions;
@@ -2189,8 +2198,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2189 nested_vmcb->control.event_inj, 2198 nested_vmcb->control.event_inj,
2190 nested_vmcb->control.nested_ctl); 2199 nested_vmcb->control.nested_ctl);
2191 2200
2192 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read, 2201 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2193 nested_vmcb->control.intercept_cr_write, 2202 nested_vmcb->control.intercept_cr >> 16,
2194 nested_vmcb->control.intercept_exceptions, 2203 nested_vmcb->control.intercept_exceptions,
2195 nested_vmcb->control.intercept); 2204 nested_vmcb->control.intercept);
2196 2205
@@ -2270,8 +2279,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2270 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; 2279 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
2271 2280
2272 /* cache intercepts */ 2281 /* cache intercepts */
2273 svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read; 2282 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
2274 svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
2275 svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read; 2283 svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
2276 svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write; 2284 svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
2277 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; 2285 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
@@ -2286,8 +2294,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2286 2294
2287 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { 2295 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2288 /* We only want the cr8 intercept bits of the guest */ 2296 /* We only want the cr8 intercept bits of the guest */
2289 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK; 2297 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2290 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; 2298 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2291 } 2299 }
2292 2300
2293 /* We don't want to see VMMCALLs from a nested guest */ 2301 /* We don't want to see VMMCALLs from a nested guest */
@@ -2579,7 +2587,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
2579 /* instruction emulation calls kvm_set_cr8() */ 2587 /* instruction emulation calls kvm_set_cr8() */
2580 emulate_instruction(&svm->vcpu, 0, 0, 0); 2588 emulate_instruction(&svm->vcpu, 0, 0, 0);
2581 if (irqchip_in_kernel(svm->vcpu.kvm)) { 2589 if (irqchip_in_kernel(svm->vcpu.kvm)) {
2582 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; 2590 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2583 return 1; 2591 return 1;
2584 } 2592 }
2585 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 2593 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
@@ -2896,8 +2904,8 @@ void dump_vmcb(struct kvm_vcpu *vcpu)
2896 struct vmcb_save_area *save = &svm->vmcb->save; 2904 struct vmcb_save_area *save = &svm->vmcb->save;
2897 2905
2898 pr_err("VMCB Control Area:\n"); 2906 pr_err("VMCB Control Area:\n");
2899 pr_err("cr_read: %04x\n", control->intercept_cr_read); 2907 pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff);
2900 pr_err("cr_write: %04x\n", control->intercept_cr_write); 2908 pr_err("cr_write: %04x\n", control->intercept_cr >> 16);
2901 pr_err("dr_read: %04x\n", control->intercept_dr_read); 2909 pr_err("dr_read: %04x\n", control->intercept_dr_read);
2902 pr_err("dr_write: %04x\n", control->intercept_dr_write); 2910 pr_err("dr_write: %04x\n", control->intercept_dr_write);
2903 pr_err("exceptions: %08x\n", control->intercept_exceptions); 2911 pr_err("exceptions: %08x\n", control->intercept_exceptions);
@@ -2998,7 +3006,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2998 3006
2999 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); 3007 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3000 3008
3001 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) 3009 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3002 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3010 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3003 if (npt_enabled) 3011 if (npt_enabled)
3004 vcpu->arch.cr3 = svm->vmcb->save.cr3; 3012 vcpu->arch.cr3 = svm->vmcb->save.cr3;
@@ -3124,7 +3132,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3124 return; 3132 return;
3125 3133
3126 if (tpr >= irr) 3134 if (tpr >= irr)
3127 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; 3135 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3128} 3136}
3129 3137
3130static int svm_nmi_allowed(struct kvm_vcpu *vcpu) 3138static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -3231,7 +3239,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3231 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) 3239 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3232 return; 3240 return;
3233 3241
3234 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { 3242 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3235 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; 3243 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3236 kvm_set_cr8(vcpu, cr8); 3244 kvm_set_cr8(vcpu, cr8);
3237 } 3245 }