diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2008-12-15 07:52:10 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:49 -0400 |
commit | 42dbaa5a057736bf8b5c22aa42dbe975bf1080e5 (patch) | |
tree | a7e625373c1ff7477e8f6f3cd835f633f161689f /arch/x86/kvm/svm.c | |
parent | 55934c0bd3bb232a9cf902820dd63ad18ed65e49 (diff) |
KVM: x86: Virtualize debug registers
So far KVM only had basic x86 debug register support, once introduced to
realize guest debugging that way. The guest itself was not able to use
those registers.
This patch now adds (almost) full support for guest self-debugging via
hardware registers. It refactors the code, moving generic parts out of
SVM (VMX was already cleaned up by the KVM_SET_GUEST_DEBUG patches), and
it ensures that the registers are properly switched between host and
guest.
This patch also prepares debug register usage by the host. The latter
will (once wired-up by the following patch) allow for hardware
breakpoints/watchpoints in guest code. If this is enabled, the guest
will only see faked debug registers without functionality, but with
content reflecting the guest's modifications.
Tested on Intel only, but SVM /should/ work as well, but who knows...
Known limitations: Trapping on tss switch won't work - most probably on
Intel.
Credits also go to Joerg Roedel - I used his once posted debugging
series as platform for this patch.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 116 |
1 files changed, 43 insertions, 73 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 88d9062f4545..815f50e425ac 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -38,9 +38,6 @@ MODULE_LICENSE("GPL"); | |||
38 | #define IOPM_ALLOC_ORDER 2 | 38 | #define IOPM_ALLOC_ORDER 2 |
39 | #define MSRPM_ALLOC_ORDER 1 | 39 | #define MSRPM_ALLOC_ORDER 1 |
40 | 40 | ||
41 | #define DR7_GD_MASK (1 << 13) | ||
42 | #define DR6_BD_MASK (1 << 13) | ||
43 | |||
44 | #define SEG_TYPE_LDT 2 | 41 | #define SEG_TYPE_LDT 2 |
45 | #define SEG_TYPE_BUSY_TSS16 3 | 42 | #define SEG_TYPE_BUSY_TSS16 3 |
46 | 43 | ||
@@ -181,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val) | |||
181 | asm volatile ("mov %0, %%cr2" :: "r" (val)); | 178 | asm volatile ("mov %0, %%cr2" :: "r" (val)); |
182 | } | 179 | } |
183 | 180 | ||
184 | static inline unsigned long read_dr6(void) | ||
185 | { | ||
186 | unsigned long dr6; | ||
187 | |||
188 | asm volatile ("mov %%dr6, %0" : "=r" (dr6)); | ||
189 | return dr6; | ||
190 | } | ||
191 | |||
192 | static inline void write_dr6(unsigned long val) | ||
193 | { | ||
194 | asm volatile ("mov %0, %%dr6" :: "r" (val)); | ||
195 | } | ||
196 | |||
197 | static inline unsigned long read_dr7(void) | ||
198 | { | ||
199 | unsigned long dr7; | ||
200 | |||
201 | asm volatile ("mov %%dr7, %0" : "=r" (dr7)); | ||
202 | return dr7; | ||
203 | } | ||
204 | |||
205 | static inline void write_dr7(unsigned long val) | ||
206 | { | ||
207 | asm volatile ("mov %0, %%dr7" :: "r" (val)); | ||
208 | } | ||
209 | |||
210 | static inline void force_new_asid(struct kvm_vcpu *vcpu) | 181 | static inline void force_new_asid(struct kvm_vcpu *vcpu) |
211 | { | 182 | { |
212 | to_svm(vcpu)->asid_generation--; | 183 | to_svm(vcpu)->asid_generation--; |
@@ -695,7 +666,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
695 | clear_page(svm->vmcb); | 666 | clear_page(svm->vmcb); |
696 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 667 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
697 | svm->asid_generation = 0; | 668 | svm->asid_generation = 0; |
698 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); | ||
699 | init_vmcb(svm); | 669 | init_vmcb(svm); |
700 | 670 | ||
701 | fx_init(&svm->vcpu); | 671 | fx_init(&svm->vcpu); |
@@ -1035,7 +1005,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | |||
1035 | 1005 | ||
1036 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 1006 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
1037 | { | 1007 | { |
1038 | unsigned long val = to_svm(vcpu)->db_regs[dr]; | 1008 | struct vcpu_svm *svm = to_svm(vcpu); |
1009 | unsigned long val; | ||
1010 | |||
1011 | switch (dr) { | ||
1012 | case 0 ... 3: | ||
1013 | val = vcpu->arch.db[dr]; | ||
1014 | break; | ||
1015 | case 6: | ||
1016 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
1017 | val = vcpu->arch.dr6; | ||
1018 | else | ||
1019 | val = svm->vmcb->save.dr6; | ||
1020 | break; | ||
1021 | case 7: | ||
1022 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
1023 | val = vcpu->arch.dr7; | ||
1024 | else | ||
1025 | val = svm->vmcb->save.dr7; | ||
1026 | break; | ||
1027 | default: | ||
1028 | val = 0; | ||
1029 | } | ||
1030 | |||
1039 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | 1031 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); |
1040 | return val; | 1032 | return val; |
1041 | } | 1033 | } |
@@ -1045,33 +1037,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
1045 | { | 1037 | { |
1046 | struct vcpu_svm *svm = to_svm(vcpu); | 1038 | struct vcpu_svm *svm = to_svm(vcpu); |
1047 | 1039 | ||
1048 | *exception = 0; | 1040 | KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler); |
1049 | 1041 | ||
1050 | if (svm->vmcb->save.dr7 & DR7_GD_MASK) { | 1042 | *exception = 0; |
1051 | svm->vmcb->save.dr7 &= ~DR7_GD_MASK; | ||
1052 | svm->vmcb->save.dr6 |= DR6_BD_MASK; | ||
1053 | *exception = DB_VECTOR; | ||
1054 | return; | ||
1055 | } | ||
1056 | 1043 | ||
1057 | switch (dr) { | 1044 | switch (dr) { |
1058 | case 0 ... 3: | 1045 | case 0 ... 3: |
1059 | svm->db_regs[dr] = value; | 1046 | vcpu->arch.db[dr] = value; |
1047 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
1048 | vcpu->arch.eff_db[dr] = value; | ||
1060 | return; | 1049 | return; |
1061 | case 4 ... 5: | 1050 | case 4 ... 5: |
1062 | if (vcpu->arch.cr4 & X86_CR4_DE) { | 1051 | if (vcpu->arch.cr4 & X86_CR4_DE) |
1063 | *exception = UD_VECTOR; | 1052 | *exception = UD_VECTOR; |
1053 | return; | ||
1054 | case 6: | ||
1055 | if (value & 0xffffffff00000000ULL) { | ||
1056 | *exception = GP_VECTOR; | ||
1064 | return; | 1057 | return; |
1065 | } | 1058 | } |
1066 | case 7: { | 1059 | vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; |
1067 | if (value & ~((1ULL << 32) - 1)) { | 1060 | return; |
1061 | case 7: | ||
1062 | if (value & 0xffffffff00000000ULL) { | ||
1068 | *exception = GP_VECTOR; | 1063 | *exception = GP_VECTOR; |
1069 | return; | 1064 | return; |
1070 | } | 1065 | } |
1071 | svm->vmcb->save.dr7 = value; | 1066 | vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; |
1067 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
1068 | svm->vmcb->save.dr7 = vcpu->arch.dr7; | ||
1069 | vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); | ||
1070 | } | ||
1072 | return; | 1071 | return; |
1073 | } | ||
1074 | default: | 1072 | default: |
1073 | /* FIXME: Possible case? */ | ||
1075 | printk(KERN_DEBUG "%s: unexpected dr %u\n", | 1074 | printk(KERN_DEBUG "%s: unexpected dr %u\n", |
1076 | __func__, dr); | 1075 | __func__, dr); |
1077 | *exception = UD_VECTOR; | 1076 | *exception = UD_VECTOR; |
@@ -2365,22 +2364,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | |||
2365 | return 0; | 2364 | return 0; |
2366 | } | 2365 | } |
2367 | 2366 | ||
2368 | static void save_db_regs(unsigned long *db_regs) | ||
2369 | { | ||
2370 | asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); | ||
2371 | asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); | ||
2372 | asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); | ||
2373 | asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); | ||
2374 | } | ||
2375 | |||
2376 | static void load_db_regs(unsigned long *db_regs) | ||
2377 | { | ||
2378 | asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); | ||
2379 | asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); | ||
2380 | asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); | ||
2381 | asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); | ||
2382 | } | ||
2383 | |||
2384 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) | 2367 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) |
2385 | { | 2368 | { |
2386 | force_new_asid(vcpu); | 2369 | force_new_asid(vcpu); |
@@ -2439,20 +2422,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2439 | gs_selector = kvm_read_gs(); | 2422 | gs_selector = kvm_read_gs(); |
2440 | ldt_selector = kvm_read_ldt(); | 2423 | ldt_selector = kvm_read_ldt(); |
2441 | svm->host_cr2 = kvm_read_cr2(); | 2424 | svm->host_cr2 = kvm_read_cr2(); |
2442 | svm->host_dr6 = read_dr6(); | ||
2443 | svm->host_dr7 = read_dr7(); | ||
2444 | if (!is_nested(svm)) | 2425 | if (!is_nested(svm)) |
2445 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | 2426 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
2446 | /* required for live migration with NPT */ | 2427 | /* required for live migration with NPT */ |
2447 | if (npt_enabled) | 2428 | if (npt_enabled) |
2448 | svm->vmcb->save.cr3 = vcpu->arch.cr3; | 2429 | svm->vmcb->save.cr3 = vcpu->arch.cr3; |
2449 | 2430 | ||
2450 | if (svm->vmcb->save.dr7 & 0xff) { | ||
2451 | write_dr7(0); | ||
2452 | save_db_regs(svm->host_db_regs); | ||
2453 | load_db_regs(svm->db_regs); | ||
2454 | } | ||
2455 | |||
2456 | clgi(); | 2431 | clgi(); |
2457 | 2432 | ||
2458 | local_irq_enable(); | 2433 | local_irq_enable(); |
@@ -2528,16 +2503,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2528 | #endif | 2503 | #endif |
2529 | ); | 2504 | ); |
2530 | 2505 | ||
2531 | if ((svm->vmcb->save.dr7 & 0xff)) | ||
2532 | load_db_regs(svm->host_db_regs); | ||
2533 | |||
2534 | vcpu->arch.cr2 = svm->vmcb->save.cr2; | 2506 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
2535 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; | 2507 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
2536 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | 2508 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
2537 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 2509 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
2538 | 2510 | ||
2539 | write_dr6(svm->host_dr6); | ||
2540 | write_dr7(svm->host_dr7); | ||
2541 | kvm_write_cr2(svm->host_cr2); | 2511 | kvm_write_cr2(svm->host_cr2); |
2542 | 2512 | ||
2543 | kvm_load_fs(fs_selector); | 2513 | kvm_load_fs(fs_selector); |