diff options
author | Avi Kivity <avi@qumranet.com> | 2008-02-24 04:20:43 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:26 -0400 |
commit | 2d3ad1f40c841bd3e97d30d423eea53915d085dc (patch) | |
tree | 39f4a5a7814cc306d002366e1af922d32b7713c5 | |
parent | 05da45583de9b383dc81dd695fe248431d6c9f2b (diff) |
KVM: Prefix control register accessors with kvm_ to avoid namespace pollution
Names like 'set_cr3()' look dangerously close to affecting the host.
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 14 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 46 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 12 |
3 files changed, 36 insertions, 36 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f46ad03c2521..50345032974d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1683,7 +1683,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1683 | vmx->vcpu.arch.rmode.active = 0; | 1683 | vmx->vcpu.arch.rmode.active = 0; |
1684 | 1684 | ||
1685 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 1685 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
1686 | set_cr8(&vmx->vcpu, 0); | 1686 | kvm_set_cr8(&vmx->vcpu, 0); |
1687 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | 1687 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; |
1688 | if (vmx->vcpu.vcpu_id == 0) | 1688 | if (vmx->vcpu.vcpu_id == 0) |
1689 | msr |= MSR_IA32_APICBASE_BSP; | 1689 | msr |= MSR_IA32_APICBASE_BSP; |
@@ -2026,22 +2026,22 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2026 | switch (cr) { | 2026 | switch (cr) { |
2027 | case 0: | 2027 | case 0: |
2028 | vcpu_load_rsp_rip(vcpu); | 2028 | vcpu_load_rsp_rip(vcpu); |
2029 | set_cr0(vcpu, vcpu->arch.regs[reg]); | 2029 | kvm_set_cr0(vcpu, vcpu->arch.regs[reg]); |
2030 | skip_emulated_instruction(vcpu); | 2030 | skip_emulated_instruction(vcpu); |
2031 | return 1; | 2031 | return 1; |
2032 | case 3: | 2032 | case 3: |
2033 | vcpu_load_rsp_rip(vcpu); | 2033 | vcpu_load_rsp_rip(vcpu); |
2034 | set_cr3(vcpu, vcpu->arch.regs[reg]); | 2034 | kvm_set_cr3(vcpu, vcpu->arch.regs[reg]); |
2035 | skip_emulated_instruction(vcpu); | 2035 | skip_emulated_instruction(vcpu); |
2036 | return 1; | 2036 | return 1; |
2037 | case 4: | 2037 | case 4: |
2038 | vcpu_load_rsp_rip(vcpu); | 2038 | vcpu_load_rsp_rip(vcpu); |
2039 | set_cr4(vcpu, vcpu->arch.regs[reg]); | 2039 | kvm_set_cr4(vcpu, vcpu->arch.regs[reg]); |
2040 | skip_emulated_instruction(vcpu); | 2040 | skip_emulated_instruction(vcpu); |
2041 | return 1; | 2041 | return 1; |
2042 | case 8: | 2042 | case 8: |
2043 | vcpu_load_rsp_rip(vcpu); | 2043 | vcpu_load_rsp_rip(vcpu); |
2044 | set_cr8(vcpu, vcpu->arch.regs[reg]); | 2044 | kvm_set_cr8(vcpu, vcpu->arch.regs[reg]); |
2045 | skip_emulated_instruction(vcpu); | 2045 | skip_emulated_instruction(vcpu); |
2046 | if (irqchip_in_kernel(vcpu->kvm)) | 2046 | if (irqchip_in_kernel(vcpu->kvm)) |
2047 | return 1; | 2047 | return 1; |
@@ -2067,14 +2067,14 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2067 | return 1; | 2067 | return 1; |
2068 | case 8: | 2068 | case 8: |
2069 | vcpu_load_rsp_rip(vcpu); | 2069 | vcpu_load_rsp_rip(vcpu); |
2070 | vcpu->arch.regs[reg] = get_cr8(vcpu); | 2070 | vcpu->arch.regs[reg] = kvm_get_cr8(vcpu); |
2071 | vcpu_put_rsp_rip(vcpu); | 2071 | vcpu_put_rsp_rip(vcpu); |
2072 | skip_emulated_instruction(vcpu); | 2072 | skip_emulated_instruction(vcpu); |
2073 | return 1; | 2073 | return 1; |
2074 | } | 2074 | } |
2075 | break; | 2075 | break; |
2076 | case 3: /* lmsw */ | 2076 | case 3: /* lmsw */ |
2077 | lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); | 2077 | kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); |
2078 | 2078 | ||
2079 | skip_emulated_instruction(vcpu); | 2079 | skip_emulated_instruction(vcpu); |
2080 | return 1; | 2080 | return 1; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0458bd516185..dbcff38dfcc3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -237,7 +237,7 @@ out: | |||
237 | return changed; | 237 | return changed; |
238 | } | 238 | } |
239 | 239 | ||
240 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 240 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
241 | { | 241 | { |
242 | if (cr0 & CR0_RESERVED_BITS) { | 242 | if (cr0 & CR0_RESERVED_BITS) { |
243 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | 243 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", |
@@ -295,15 +295,15 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
295 | kvm_mmu_reset_context(vcpu); | 295 | kvm_mmu_reset_context(vcpu); |
296 | return; | 296 | return; |
297 | } | 297 | } |
298 | EXPORT_SYMBOL_GPL(set_cr0); | 298 | EXPORT_SYMBOL_GPL(kvm_set_cr0); |
299 | 299 | ||
300 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | 300 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
301 | { | 301 | { |
302 | set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); | 302 | kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); |
303 | } | 303 | } |
304 | EXPORT_SYMBOL_GPL(lmsw); | 304 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
305 | 305 | ||
306 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 306 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
307 | { | 307 | { |
308 | if (cr4 & CR4_RESERVED_BITS) { | 308 | if (cr4 & CR4_RESERVED_BITS) { |
309 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | 309 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); |
@@ -334,9 +334,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
334 | vcpu->arch.cr4 = cr4; | 334 | vcpu->arch.cr4 = cr4; |
335 | kvm_mmu_reset_context(vcpu); | 335 | kvm_mmu_reset_context(vcpu); |
336 | } | 336 | } |
337 | EXPORT_SYMBOL_GPL(set_cr4); | 337 | EXPORT_SYMBOL_GPL(kvm_set_cr4); |
338 | 338 | ||
339 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 339 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
340 | { | 340 | { |
341 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { | 341 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { |
342 | kvm_mmu_flush_tlb(vcpu); | 342 | kvm_mmu_flush_tlb(vcpu); |
@@ -388,9 +388,9 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
388 | } | 388 | } |
389 | up_read(&vcpu->kvm->slots_lock); | 389 | up_read(&vcpu->kvm->slots_lock); |
390 | } | 390 | } |
391 | EXPORT_SYMBOL_GPL(set_cr3); | 391 | EXPORT_SYMBOL_GPL(kvm_set_cr3); |
392 | 392 | ||
393 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 393 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
394 | { | 394 | { |
395 | if (cr8 & CR8_RESERVED_BITS) { | 395 | if (cr8 & CR8_RESERVED_BITS) { |
396 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | 396 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); |
@@ -402,16 +402,16 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | |||
402 | else | 402 | else |
403 | vcpu->arch.cr8 = cr8; | 403 | vcpu->arch.cr8 = cr8; |
404 | } | 404 | } |
405 | EXPORT_SYMBOL_GPL(set_cr8); | 405 | EXPORT_SYMBOL_GPL(kvm_set_cr8); |
406 | 406 | ||
407 | unsigned long get_cr8(struct kvm_vcpu *vcpu) | 407 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) |
408 | { | 408 | { |
409 | if (irqchip_in_kernel(vcpu->kvm)) | 409 | if (irqchip_in_kernel(vcpu->kvm)) |
410 | return kvm_lapic_get_cr8(vcpu); | 410 | return kvm_lapic_get_cr8(vcpu); |
411 | else | 411 | else |
412 | return vcpu->arch.cr8; | 412 | return vcpu->arch.cr8; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL_GPL(get_cr8); | 414 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS | 417 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS |
@@ -2462,7 +2462,7 @@ void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) | |||
2462 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | 2462 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, |
2463 | unsigned long *rflags) | 2463 | unsigned long *rflags) |
2464 | { | 2464 | { |
2465 | lmsw(vcpu, msw); | 2465 | kvm_lmsw(vcpu, msw); |
2466 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 2466 | *rflags = kvm_x86_ops->get_rflags(vcpu); |
2467 | } | 2467 | } |
2468 | 2468 | ||
@@ -2479,7 +2479,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | |||
2479 | case 4: | 2479 | case 4: |
2480 | return vcpu->arch.cr4; | 2480 | return vcpu->arch.cr4; |
2481 | case 8: | 2481 | case 8: |
2482 | return get_cr8(vcpu); | 2482 | return kvm_get_cr8(vcpu); |
2483 | default: | 2483 | default: |
2484 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); | 2484 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); |
2485 | return 0; | 2485 | return 0; |
@@ -2491,20 +2491,20 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
2491 | { | 2491 | { |
2492 | switch (cr) { | 2492 | switch (cr) { |
2493 | case 0: | 2493 | case 0: |
2494 | set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); | 2494 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); |
2495 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 2495 | *rflags = kvm_x86_ops->get_rflags(vcpu); |
2496 | break; | 2496 | break; |
2497 | case 2: | 2497 | case 2: |
2498 | vcpu->arch.cr2 = val; | 2498 | vcpu->arch.cr2 = val; |
2499 | break; | 2499 | break; |
2500 | case 3: | 2500 | case 3: |
2501 | set_cr3(vcpu, val); | 2501 | kvm_set_cr3(vcpu, val); |
2502 | break; | 2502 | break; |
2503 | case 4: | 2503 | case 4: |
2504 | set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); | 2504 | kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); |
2505 | break; | 2505 | break; |
2506 | case 8: | 2506 | case 8: |
2507 | set_cr8(vcpu, val & 0xfUL); | 2507 | kvm_set_cr8(vcpu, val & 0xfUL); |
2508 | break; | 2508 | break; |
2509 | default: | 2509 | default: |
2510 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); | 2510 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); |
@@ -2602,7 +2602,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu, | |||
2602 | struct kvm_run *kvm_run) | 2602 | struct kvm_run *kvm_run) |
2603 | { | 2603 | { |
2604 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | 2604 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; |
2605 | kvm_run->cr8 = get_cr8(vcpu); | 2605 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
2606 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 2606 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
2607 | if (irqchip_in_kernel(vcpu->kvm)) | 2607 | if (irqchip_in_kernel(vcpu->kvm)) |
2608 | kvm_run->ready_for_interrupt_injection = 1; | 2608 | kvm_run->ready_for_interrupt_injection = 1; |
@@ -2803,7 +2803,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2803 | 2803 | ||
2804 | /* re-sync apic's tpr */ | 2804 | /* re-sync apic's tpr */ |
2805 | if (!irqchip_in_kernel(vcpu->kvm)) | 2805 | if (!irqchip_in_kernel(vcpu->kvm)) |
2806 | set_cr8(vcpu, kvm_run->cr8); | 2806 | kvm_set_cr8(vcpu, kvm_run->cr8); |
2807 | 2807 | ||
2808 | if (vcpu->arch.pio.cur_count) { | 2808 | if (vcpu->arch.pio.cur_count) { |
2809 | r = complete_pio(vcpu); | 2809 | r = complete_pio(vcpu); |
@@ -2961,7 +2961,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
2961 | sregs->cr2 = vcpu->arch.cr2; | 2961 | sregs->cr2 = vcpu->arch.cr2; |
2962 | sregs->cr3 = vcpu->arch.cr3; | 2962 | sregs->cr3 = vcpu->arch.cr3; |
2963 | sregs->cr4 = vcpu->arch.cr4; | 2963 | sregs->cr4 = vcpu->arch.cr4; |
2964 | sregs->cr8 = get_cr8(vcpu); | 2964 | sregs->cr8 = kvm_get_cr8(vcpu); |
2965 | sregs->efer = vcpu->arch.shadow_efer; | 2965 | sregs->efer = vcpu->arch.shadow_efer; |
2966 | sregs->apic_base = kvm_get_apic_base(vcpu); | 2966 | sregs->apic_base = kvm_get_apic_base(vcpu); |
2967 | 2967 | ||
@@ -3007,7 +3007,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
3007 | mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; | 3007 | mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; |
3008 | vcpu->arch.cr3 = sregs->cr3; | 3008 | vcpu->arch.cr3 = sregs->cr3; |
3009 | 3009 | ||
3010 | set_cr8(vcpu, sregs->cr8); | 3010 | kvm_set_cr8(vcpu, sregs->cr8); |
3011 | 3011 | ||
3012 | mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; | 3012 | mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; |
3013 | kvm_x86_ops->set_efer(vcpu, sregs->efer); | 3013 | kvm_x86_ops->set_efer(vcpu, sregs->efer); |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 95473ef5a906..49ced21e0290 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -470,12 +470,12 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
470 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | 470 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
471 | unsigned long value); | 471 | unsigned long value); |
472 | 472 | ||
473 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 473 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
474 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); | 474 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); |
475 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); | 475 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); |
476 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); | 476 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); |
477 | unsigned long get_cr8(struct kvm_vcpu *vcpu); | 477 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
478 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | 478 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
479 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | 479 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
480 | 480 | ||
481 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 481 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |