aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-24 12:59:10 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 06:52:58 -0400
commite02317153e77150fed9609c3212c98204ec3ea74 (patch)
tree7c573611793b0453f59b044369f073495ddedb2e
parent83bf0002c91b65744db78df36d4f1af27bd9099b (diff)
KVM: SVM: Coding style cleanup
This patch removes whitespace errors, fixes comment formats and most of checkpatch warnings. Now vim does not show c-space-errors anymore. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c148
1 files changed, 81 insertions, 67 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 468ff6e721ce..44679530ad5d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -120,7 +120,7 @@ struct vcpu_svm {
120#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 120#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
121static bool npt_enabled = true; 121static bool npt_enabled = true;
122#else 122#else
123static bool npt_enabled = false; 123static bool npt_enabled;
124#endif 124#endif
125static int npt = 1; 125static int npt = 1;
126 126
@@ -168,8 +168,8 @@ static unsigned long iopm_base;
168struct kvm_ldttss_desc { 168struct kvm_ldttss_desc {
169 u16 limit0; 169 u16 limit0;
170 u16 base0; 170 u16 base0;
171 unsigned base1 : 8, type : 5, dpl : 2, p : 1; 171 unsigned base1:8, type:5, dpl:2, p:1;
172 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; 172 unsigned limit1:4, zero0:3, g:1, base2:8;
173 u32 base3; 173 u32 base3;
174 u32 zero1; 174 u32 zero1;
175} __attribute__((packed)); 175} __attribute__((packed));
@@ -218,7 +218,7 @@ static inline void stgi(void)
218 218
219static inline void invlpga(unsigned long addr, u32 asid) 219static inline void invlpga(unsigned long addr, u32 asid)
220{ 220{
221 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid)); 221 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
222} 222}
223 223
224static inline void force_new_asid(struct kvm_vcpu *vcpu) 224static inline void force_new_asid(struct kvm_vcpu *vcpu)
@@ -290,8 +290,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
290{ 290{
291 struct vcpu_svm *svm = to_svm(vcpu); 291 struct vcpu_svm *svm = to_svm(vcpu);
292 292
293 /* If we are within a nested VM we'd better #VMEXIT and let the 293 /*
294 guest handle the exception */ 294 * If we are within a nested VM we'd better #VMEXIT and let the guest
295 * handle the exception
296 */
295 if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) 297 if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
296 return; 298 return;
297 299
@@ -544,7 +546,7 @@ static void init_seg(struct vmcb_seg *seg)
544{ 546{
545 seg->selector = 0; 547 seg->selector = 0;
546 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | 548 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
547 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ 549 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
548 seg->limit = 0xffff; 550 seg->limit = 0xffff;
549 seg->base = 0; 551 seg->base = 0;
550} 552}
@@ -564,16 +566,16 @@ static void init_vmcb(struct vcpu_svm *svm)
564 566
565 svm->vcpu.fpu_active = 1; 567 svm->vcpu.fpu_active = 1;
566 568
567 control->intercept_cr_read = INTERCEPT_CR0_MASK | 569 control->intercept_cr_read = INTERCEPT_CR0_MASK |
568 INTERCEPT_CR3_MASK | 570 INTERCEPT_CR3_MASK |
569 INTERCEPT_CR4_MASK; 571 INTERCEPT_CR4_MASK;
570 572
571 control->intercept_cr_write = INTERCEPT_CR0_MASK | 573 control->intercept_cr_write = INTERCEPT_CR0_MASK |
572 INTERCEPT_CR3_MASK | 574 INTERCEPT_CR3_MASK |
573 INTERCEPT_CR4_MASK | 575 INTERCEPT_CR4_MASK |
574 INTERCEPT_CR8_MASK; 576 INTERCEPT_CR8_MASK;
575 577
576 control->intercept_dr_read = INTERCEPT_DR0_MASK | 578 control->intercept_dr_read = INTERCEPT_DR0_MASK |
577 INTERCEPT_DR1_MASK | 579 INTERCEPT_DR1_MASK |
578 INTERCEPT_DR2_MASK | 580 INTERCEPT_DR2_MASK |
579 INTERCEPT_DR3_MASK | 581 INTERCEPT_DR3_MASK |
@@ -582,7 +584,7 @@ static void init_vmcb(struct vcpu_svm *svm)
582 INTERCEPT_DR6_MASK | 584 INTERCEPT_DR6_MASK |
583 INTERCEPT_DR7_MASK; 585 INTERCEPT_DR7_MASK;
584 586
585 control->intercept_dr_write = INTERCEPT_DR0_MASK | 587 control->intercept_dr_write = INTERCEPT_DR0_MASK |
586 INTERCEPT_DR1_MASK | 588 INTERCEPT_DR1_MASK |
587 INTERCEPT_DR2_MASK | 589 INTERCEPT_DR2_MASK |
588 INTERCEPT_DR3_MASK | 590 INTERCEPT_DR3_MASK |
@@ -596,7 +598,7 @@ static void init_vmcb(struct vcpu_svm *svm)
596 (1 << MC_VECTOR); 598 (1 << MC_VECTOR);
597 599
598 600
599 control->intercept = (1ULL << INTERCEPT_INTR) | 601 control->intercept = (1ULL << INTERCEPT_INTR) |
600 (1ULL << INTERCEPT_NMI) | 602 (1ULL << INTERCEPT_NMI) |
601 (1ULL << INTERCEPT_SMI) | 603 (1ULL << INTERCEPT_SMI) |
602 (1ULL << INTERCEPT_SELECTIVE_CR0) | 604 (1ULL << INTERCEPT_SELECTIVE_CR0) |
@@ -657,7 +659,8 @@ static void init_vmcb(struct vcpu_svm *svm)
657 save->rip = 0x0000fff0; 659 save->rip = 0x0000fff0;
658 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; 660 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
659 661
660 /* This is the guest-visible cr0 value. 662 /*
663 * This is the guest-visible cr0 value.
661 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 664 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
662 */ 665 */
663 svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 666 svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@ -903,7 +906,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
903 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 906 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
904 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 907 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
905 908
906 /* AMD's VMCB does not have an explicit unusable field, so emulate it 909 /*
910 * AMD's VMCB does not have an explicit unusable field, so emulate it
907 * for cross vendor migration purposes by "not present" 911 * for cross vendor migration purposes by "not present"
908 */ 912 */
909 var->unusable = !var->present || (var->type == 0); 913 var->unusable = !var->present || (var->type == 0);
@@ -939,7 +943,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
939 var->type |= 0x1; 943 var->type |= 0x1;
940 break; 944 break;
941 case VCPU_SREG_SS: 945 case VCPU_SREG_SS:
942 /* On AMD CPUs sometimes the DB bit in the segment 946 /*
947 * On AMD CPUs sometimes the DB bit in the segment
943 * descriptor is left as 1, although the whole segment has 948 * descriptor is left as 1, although the whole segment has
944 * been made unusable. Clear it here to pass an Intel VMX 949 * been made unusable. Clear it here to pass an Intel VMX
945 * entry check when cross vendor migrating. 950 * entry check when cross vendor migrating.
@@ -1270,7 +1275,7 @@ static int db_interception(struct vcpu_svm *svm)
1270 } 1275 }
1271 1276
1272 if (svm->vcpu.guest_debug & 1277 if (svm->vcpu.guest_debug &
1273 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){ 1278 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1274 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1279 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1275 kvm_run->debug.arch.pc = 1280 kvm_run->debug.arch.pc =
1276 svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1281 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1315,7 +1320,7 @@ static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1315 excp = h_excp | n_excp; 1320 excp = h_excp | n_excp;
1316 } else { 1321 } else {
1317 excp = svm->vmcb->control.intercept_exceptions; 1322 excp = svm->vmcb->control.intercept_exceptions;
1318 excp &= ~(1 << NM_VECTOR); 1323 excp &= ~(1 << NM_VECTOR);
1319 } 1324 }
1320 1325
1321 svm->vmcb->control.intercept_exceptions = excp; 1326 svm->vmcb->control.intercept_exceptions = excp;
@@ -1554,13 +1559,13 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
1554 case SVM_EXIT_INTR: 1559 case SVM_EXIT_INTR:
1555 case SVM_EXIT_NMI: 1560 case SVM_EXIT_NMI:
1556 return NESTED_EXIT_HOST; 1561 return NESTED_EXIT_HOST;
1557 /* For now we are always handling NPFs when using them */
1558 case SVM_EXIT_NPF: 1562 case SVM_EXIT_NPF:
1563 /* For now we are always handling NPFs when using them */
1559 if (npt_enabled) 1564 if (npt_enabled)
1560 return NESTED_EXIT_HOST; 1565 return NESTED_EXIT_HOST;
1561 break; 1566 break;
1562 /* When we're shadowing, trap PFs */
1563 case SVM_EXIT_EXCP_BASE + PF_VECTOR: 1567 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1568 /* When we're shadowing, trap PFs */
1564 if (!npt_enabled) 1569 if (!npt_enabled)
1565 return NESTED_EXIT_HOST; 1570 return NESTED_EXIT_HOST;
1566 break; 1571 break;
@@ -1795,7 +1800,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1795 if (!nested_msrpm) 1800 if (!nested_msrpm)
1796 return false; 1801 return false;
1797 1802
1798 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) 1803 for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1799 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; 1804 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1800 1805
1801 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); 1806 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
@@ -1829,8 +1834,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1829 kvm_clear_exception_queue(&svm->vcpu); 1834 kvm_clear_exception_queue(&svm->vcpu);
1830 kvm_clear_interrupt_queue(&svm->vcpu); 1835 kvm_clear_interrupt_queue(&svm->vcpu);
1831 1836
1832 /* Save the old vmcb, so we don't need to pick what we save, but 1837 /*
1833 can restore everything when a VMEXIT occurs */ 1838 * Save the old vmcb, so we don't need to pick what we save, but can
1839 * restore everything when a VMEXIT occurs
1840 */
1834 hsave->save.es = vmcb->save.es; 1841 hsave->save.es = vmcb->save.es;
1835 hsave->save.cs = vmcb->save.cs; 1842 hsave->save.cs = vmcb->save.cs;
1836 hsave->save.ss = vmcb->save.ss; 1843 hsave->save.ss = vmcb->save.ss;
@@ -1878,6 +1885,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1878 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); 1885 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1879 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); 1886 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1880 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); 1887 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1888
1881 /* In case we don't even reach vcpu_run, the fields are not updated */ 1889 /* In case we don't even reach vcpu_run, the fields are not updated */
1882 svm->vmcb->save.rax = nested_vmcb->save.rax; 1890 svm->vmcb->save.rax = nested_vmcb->save.rax;
1883 svm->vmcb->save.rsp = nested_vmcb->save.rsp; 1891 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
@@ -1909,8 +1917,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1909 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; 1917 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1910 } 1918 }
1911 1919
1912 /* We don't want a nested guest to be more powerful than the guest, 1920 /*
1913 so all intercepts are ORed */ 1921 * We don't want a nested guest to be more powerful than the guest, so
1922 * all intercepts are ORed
1923 */
1914 svm->vmcb->control.intercept_cr_read |= 1924 svm->vmcb->control.intercept_cr_read |=
1915 nested_vmcb->control.intercept_cr_read; 1925 nested_vmcb->control.intercept_cr_read;
1916 svm->vmcb->control.intercept_cr_write |= 1926 svm->vmcb->control.intercept_cr_write |=
@@ -2224,9 +2234,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2224 case MSR_IA32_SYSENTER_ESP: 2234 case MSR_IA32_SYSENTER_ESP:
2225 *data = svm->sysenter_esp; 2235 *data = svm->sysenter_esp;
2226 break; 2236 break;
2227 /* Nobody will change the following 5 values in the VMCB so 2237 /*
2228 we can safely return them on rdmsr. They will always be 0 2238 * Nobody will change the following 5 values in the VMCB so we can
2229 until LBRV is implemented. */ 2239 * safely return them on rdmsr. They will always be 0 until LBRV is
2240 * implemented.
2241 */
2230 case MSR_IA32_DEBUGCTLMSR: 2242 case MSR_IA32_DEBUGCTLMSR:
2231 *data = svm->vmcb->save.dbgctl; 2243 *data = svm->vmcb->save.dbgctl;
2232 break; 2244 break;
@@ -2405,16 +2417,16 @@ static int pause_interception(struct vcpu_svm *svm)
2405} 2417}
2406 2418
2407static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { 2419static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2408 [SVM_EXIT_READ_CR0] = emulate_on_interception, 2420 [SVM_EXIT_READ_CR0] = emulate_on_interception,
2409 [SVM_EXIT_READ_CR3] = emulate_on_interception, 2421 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2410 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2422 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2411 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2423 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2412 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2424 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2413 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2425 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
2414 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2426 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2415 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2427 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2416 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2428 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
2417 [SVM_EXIT_READ_DR0] = emulate_on_interception, 2429 [SVM_EXIT_READ_DR0] = emulate_on_interception,
2418 [SVM_EXIT_READ_DR1] = emulate_on_interception, 2430 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2419 [SVM_EXIT_READ_DR2] = emulate_on_interception, 2431 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2420 [SVM_EXIT_READ_DR3] = emulate_on_interception, 2432 [SVM_EXIT_READ_DR3] = emulate_on_interception,
@@ -2433,15 +2445,14 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2433 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 2445 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2434 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 2446 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
2435 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 2447 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
2436 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 2448 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
2437 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, 2449 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
2438 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, 2450 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
2439 [SVM_EXIT_INTR] = intr_interception, 2451 [SVM_EXIT_INTR] = intr_interception,
2440 [SVM_EXIT_NMI] = nmi_interception, 2452 [SVM_EXIT_NMI] = nmi_interception,
2441 [SVM_EXIT_SMI] = nop_on_interception, 2453 [SVM_EXIT_SMI] = nop_on_interception,
2442 [SVM_EXIT_INIT] = nop_on_interception, 2454 [SVM_EXIT_INIT] = nop_on_interception,
2443 [SVM_EXIT_VINTR] = interrupt_window_interception, 2455 [SVM_EXIT_VINTR] = interrupt_window_interception,
2444 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
2445 [SVM_EXIT_CPUID] = cpuid_interception, 2456 [SVM_EXIT_CPUID] = cpuid_interception,
2446 [SVM_EXIT_IRET] = iret_interception, 2457 [SVM_EXIT_IRET] = iret_interception,
2447 [SVM_EXIT_INVD] = emulate_on_interception, 2458 [SVM_EXIT_INVD] = emulate_on_interception,
@@ -2449,7 +2460,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2449 [SVM_EXIT_HLT] = halt_interception, 2460 [SVM_EXIT_HLT] = halt_interception,
2450 [SVM_EXIT_INVLPG] = invlpg_interception, 2461 [SVM_EXIT_INVLPG] = invlpg_interception,
2451 [SVM_EXIT_INVLPGA] = invlpga_interception, 2462 [SVM_EXIT_INVLPGA] = invlpga_interception,
2452 [SVM_EXIT_IOIO] = io_interception, 2463 [SVM_EXIT_IOIO] = io_interception,
2453 [SVM_EXIT_MSR] = msr_interception, 2464 [SVM_EXIT_MSR] = msr_interception,
2454 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 2465 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
2455 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 2466 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
@@ -2650,10 +2661,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
2650{ 2661{
2651 struct vcpu_svm *svm = to_svm(vcpu); 2662 struct vcpu_svm *svm = to_svm(vcpu);
2652 2663
2653 /* In case GIF=0 we can't rely on the CPU to tell us when 2664 /*
2654 * GIF becomes 1, because that's a separate STGI/VMRUN intercept. 2665 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
2655 * The next time we get that intercept, this function will be 2666 * 1, because that's a separate STGI/VMRUN intercept. The next time we
2656 * called again though and we'll get the vintr intercept. */ 2667 * get that intercept, this function will be called again though and
2668 * we'll get the vintr intercept.
2669 */
2657 if (gif_set(svm) && nested_svm_intr(svm)) { 2670 if (gif_set(svm) && nested_svm_intr(svm)) {
2658 svm_set_vintr(svm); 2671 svm_set_vintr(svm);
2659 svm_inject_irq(svm, 0x0); 2672 svm_inject_irq(svm, 0x0);
@@ -2668,9 +2681,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
2668 == HF_NMI_MASK) 2681 == HF_NMI_MASK)
2669 return; /* IRET will cause a vm exit */ 2682 return; /* IRET will cause a vm exit */
2670 2683
2671 /* Something prevents NMI from been injected. Single step over 2684 /*
2672 possible problem (IRET or exception injection or interrupt 2685 * Something prevents NMI from been injected. Single step over possible
2673 shadow) */ 2686 * problem (IRET or exception injection or interrupt shadow)
2687 */
2674 svm->nmi_singlestep = true; 2688 svm->nmi_singlestep = true;
2675 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 2689 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2676 update_db_intercept(vcpu); 2690 update_db_intercept(vcpu);
@@ -2978,24 +2992,24 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2978} 2992}
2979 2993
2980static const struct trace_print_flags svm_exit_reasons_str[] = { 2994static const struct trace_print_flags svm_exit_reasons_str[] = {
2981 { SVM_EXIT_READ_CR0, "read_cr0" }, 2995 { SVM_EXIT_READ_CR0, "read_cr0" },
2982 { SVM_EXIT_READ_CR3, "read_cr3" }, 2996 { SVM_EXIT_READ_CR3, "read_cr3" },
2983 { SVM_EXIT_READ_CR4, "read_cr4" }, 2997 { SVM_EXIT_READ_CR4, "read_cr4" },
2984 { SVM_EXIT_READ_CR8, "read_cr8" }, 2998 { SVM_EXIT_READ_CR8, "read_cr8" },
2985 { SVM_EXIT_WRITE_CR0, "write_cr0" }, 2999 { SVM_EXIT_WRITE_CR0, "write_cr0" },
2986 { SVM_EXIT_WRITE_CR3, "write_cr3" }, 3000 { SVM_EXIT_WRITE_CR3, "write_cr3" },
2987 { SVM_EXIT_WRITE_CR4, "write_cr4" }, 3001 { SVM_EXIT_WRITE_CR4, "write_cr4" },
2988 { SVM_EXIT_WRITE_CR8, "write_cr8" }, 3002 { SVM_EXIT_WRITE_CR8, "write_cr8" },
2989 { SVM_EXIT_READ_DR0, "read_dr0" }, 3003 { SVM_EXIT_READ_DR0, "read_dr0" },
2990 { SVM_EXIT_READ_DR1, "read_dr1" }, 3004 { SVM_EXIT_READ_DR1, "read_dr1" },
2991 { SVM_EXIT_READ_DR2, "read_dr2" }, 3005 { SVM_EXIT_READ_DR2, "read_dr2" },
2992 { SVM_EXIT_READ_DR3, "read_dr3" }, 3006 { SVM_EXIT_READ_DR3, "read_dr3" },
2993 { SVM_EXIT_WRITE_DR0, "write_dr0" }, 3007 { SVM_EXIT_WRITE_DR0, "write_dr0" },
2994 { SVM_EXIT_WRITE_DR1, "write_dr1" }, 3008 { SVM_EXIT_WRITE_DR1, "write_dr1" },
2995 { SVM_EXIT_WRITE_DR2, "write_dr2" }, 3009 { SVM_EXIT_WRITE_DR2, "write_dr2" },
2996 { SVM_EXIT_WRITE_DR3, "write_dr3" }, 3010 { SVM_EXIT_WRITE_DR3, "write_dr3" },
2997 { SVM_EXIT_WRITE_DR5, "write_dr5" }, 3011 { SVM_EXIT_WRITE_DR5, "write_dr5" },
2998 { SVM_EXIT_WRITE_DR7, "write_dr7" }, 3012 { SVM_EXIT_WRITE_DR7, "write_dr7" },
2999 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, 3013 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
3000 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, 3014 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
3001 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, 3015 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },