diff options
author | Carsten Otte <cotte@de.ibm.com> | 2007-10-30 13:44:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:58 -0500 |
commit | 15c4a6406f6c40632260861e1db7c539e79dcf1a (patch) | |
tree | 1de5718d9bbdda7c2c14c757967de6b2b1587c07 /drivers/kvm/x86.c | |
parent | aab61cc0d28f6fab0c2c9137d95dea54c7dbcf46 (diff) |
KVM: Portability: Move kvm_get/set_msr[_common] to x86.c
This patch moves the implementation of the functions of kvm_get/set_msr,
kvm_get/set_msr_common, and set_efer from kvm_main.c to x86.c. The
definition of EFER_RESERVED_BITS is moved too.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r-- | drivers/kvm/x86.c | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index a728af8a83e5..786274347512 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -38,6 +38,7 @@ | |||
38 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | 38 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) |
39 | 39 | ||
40 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | 40 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) |
41 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe | ||
41 | 42 | ||
42 | unsigned long segment_base(u16 selector) | 43 | unsigned long segment_base(u16 selector) |
43 | { | 44 | { |
@@ -324,6 +325,44 @@ static u32 emulated_msrs[] = { | |||
324 | MSR_IA32_MISC_ENABLE, | 325 | MSR_IA32_MISC_ENABLE, |
325 | }; | 326 | }; |
326 | 327 | ||
328 | #ifdef CONFIG_X86_64 | ||
329 | |||
330 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | ||
331 | { | ||
332 | if (efer & EFER_RESERVED_BITS) { | ||
333 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
334 | efer); | ||
335 | inject_gp(vcpu); | ||
336 | return; | ||
337 | } | ||
338 | |||
339 | if (is_paging(vcpu) | ||
340 | && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { | ||
341 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
342 | inject_gp(vcpu); | ||
343 | return; | ||
344 | } | ||
345 | |||
346 | kvm_x86_ops->set_efer(vcpu, efer); | ||
347 | |||
348 | efer &= ~EFER_LMA; | ||
349 | efer |= vcpu->shadow_efer & EFER_LMA; | ||
350 | |||
351 | vcpu->shadow_efer = efer; | ||
352 | } | ||
353 | |||
354 | #endif | ||
355 | |||
356 | /* | ||
357 | * Writes msr value into into the appropriate "register". | ||
358 | * Returns 0 on success, non-0 otherwise. | ||
359 | * Assumes vcpu_load() was already called. | ||
360 | */ | ||
361 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | ||
362 | { | ||
363 | return kvm_x86_ops->set_msr(vcpu, msr_index, data); | ||
364 | } | ||
365 | |||
327 | /* | 366 | /* |
328 | * Adapt set_msr() to msr_io()'s calling convention | 367 | * Adapt set_msr() to msr_io()'s calling convention |
329 | */ | 368 | */ |
@@ -332,6 +371,101 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | |||
332 | return kvm_set_msr(vcpu, index, *data); | 371 | return kvm_set_msr(vcpu, index, *data); |
333 | } | 372 | } |
334 | 373 | ||
374 | |||
375 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | ||
376 | { | ||
377 | switch (msr) { | ||
378 | #ifdef CONFIG_X86_64 | ||
379 | case MSR_EFER: | ||
380 | set_efer(vcpu, data); | ||
381 | break; | ||
382 | #endif | ||
383 | case MSR_IA32_MC0_STATUS: | ||
384 | pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", | ||
385 | __FUNCTION__, data); | ||
386 | break; | ||
387 | case MSR_IA32_MCG_STATUS: | ||
388 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", | ||
389 | __FUNCTION__, data); | ||
390 | break; | ||
391 | case MSR_IA32_UCODE_REV: | ||
392 | case MSR_IA32_UCODE_WRITE: | ||
393 | case 0x200 ... 0x2ff: /* MTRRs */ | ||
394 | break; | ||
395 | case MSR_IA32_APICBASE: | ||
396 | kvm_set_apic_base(vcpu, data); | ||
397 | break; | ||
398 | case MSR_IA32_MISC_ENABLE: | ||
399 | vcpu->ia32_misc_enable_msr = data; | ||
400 | break; | ||
401 | default: | ||
402 | pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr); | ||
403 | return 1; | ||
404 | } | ||
405 | return 0; | ||
406 | } | ||
407 | EXPORT_SYMBOL_GPL(kvm_set_msr_common); | ||
408 | |||
409 | |||
410 | /* | ||
411 | * Reads an msr value (of 'msr_index') into 'pdata'. | ||
412 | * Returns 0 on success, non-0 otherwise. | ||
413 | * Assumes vcpu_load() was already called. | ||
414 | */ | ||
415 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | ||
416 | { | ||
417 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); | ||
418 | } | ||
419 | |||
420 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | ||
421 | { | ||
422 | u64 data; | ||
423 | |||
424 | switch (msr) { | ||
425 | case 0xc0010010: /* SYSCFG */ | ||
426 | case 0xc0010015: /* HWCR */ | ||
427 | case MSR_IA32_PLATFORM_ID: | ||
428 | case MSR_IA32_P5_MC_ADDR: | ||
429 | case MSR_IA32_P5_MC_TYPE: | ||
430 | case MSR_IA32_MC0_CTL: | ||
431 | case MSR_IA32_MCG_STATUS: | ||
432 | case MSR_IA32_MCG_CAP: | ||
433 | case MSR_IA32_MC0_MISC: | ||
434 | case MSR_IA32_MC0_MISC+4: | ||
435 | case MSR_IA32_MC0_MISC+8: | ||
436 | case MSR_IA32_MC0_MISC+12: | ||
437 | case MSR_IA32_MC0_MISC+16: | ||
438 | case MSR_IA32_UCODE_REV: | ||
439 | case MSR_IA32_PERF_STATUS: | ||
440 | case MSR_IA32_EBL_CR_POWERON: | ||
441 | /* MTRR registers */ | ||
442 | case 0xfe: | ||
443 | case 0x200 ... 0x2ff: | ||
444 | data = 0; | ||
445 | break; | ||
446 | case 0xcd: /* fsb frequency */ | ||
447 | data = 3; | ||
448 | break; | ||
449 | case MSR_IA32_APICBASE: | ||
450 | data = kvm_get_apic_base(vcpu); | ||
451 | break; | ||
452 | case MSR_IA32_MISC_ENABLE: | ||
453 | data = vcpu->ia32_misc_enable_msr; | ||
454 | break; | ||
455 | #ifdef CONFIG_X86_64 | ||
456 | case MSR_EFER: | ||
457 | data = vcpu->shadow_efer; | ||
458 | break; | ||
459 | #endif | ||
460 | default: | ||
461 | pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); | ||
462 | return 1; | ||
463 | } | ||
464 | *pdata = data; | ||
465 | return 0; | ||
466 | } | ||
467 | EXPORT_SYMBOL_GPL(kvm_get_msr_common); | ||
468 | |||
335 | /* | 469 | /* |
336 | * Read or write a bunch of msrs. All parameters are kernel addresses. | 470 | * Read or write a bunch of msrs. All parameters are kernel addresses. |
337 | * | 471 | * |