diff options
author | Carsten Otte <cotte@de.ibm.com> | 2007-10-11 13:16:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:52 -0500 |
commit | 313a3dc75da20630e549441932a7654223f8d72a (patch) | |
tree | 96af61f06c198834d49c886476ed30b418d853c0 /drivers/kvm/kvm_main.c | |
parent | c4fcc2724628c6548748ec80a90b548fc300e81f (diff) |
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 200 |
1 files changed, 6 insertions, 194 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ec696887b222..5fd2864b7811 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -90,8 +90,6 @@ static struct kvm_stats_debugfs_item { | |||
90 | 90 | ||
91 | static struct dentry *debugfs_dir; | 91 | static struct dentry *debugfs_dir; |
92 | 92 | ||
93 | #define MAX_IO_MSRS 256 | ||
94 | |||
95 | #define CR0_RESERVED_BITS \ | 93 | #define CR0_RESERVED_BITS \ |
96 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | 94 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ |
97 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | 95 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ |
@@ -179,21 +177,21 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); | |||
179 | /* | 177 | /* |
180 | * Switches to specified vcpu, until a matching vcpu_put() | 178 | * Switches to specified vcpu, until a matching vcpu_put() |
181 | */ | 179 | */ |
182 | static void vcpu_load(struct kvm_vcpu *vcpu) | 180 | void vcpu_load(struct kvm_vcpu *vcpu) |
183 | { | 181 | { |
184 | int cpu; | 182 | int cpu; |
185 | 183 | ||
186 | mutex_lock(&vcpu->mutex); | 184 | mutex_lock(&vcpu->mutex); |
187 | cpu = get_cpu(); | 185 | cpu = get_cpu(); |
188 | preempt_notifier_register(&vcpu->preempt_notifier); | 186 | preempt_notifier_register(&vcpu->preempt_notifier); |
189 | kvm_x86_ops->vcpu_load(vcpu, cpu); | 187 | kvm_arch_vcpu_load(vcpu, cpu); |
190 | put_cpu(); | 188 | put_cpu(); |
191 | } | 189 | } |
192 | 190 | ||
193 | static void vcpu_put(struct kvm_vcpu *vcpu) | 191 | void vcpu_put(struct kvm_vcpu *vcpu) |
194 | { | 192 | { |
195 | preempt_disable(); | 193 | preempt_disable(); |
196 | kvm_x86_ops->vcpu_put(vcpu); | 194 | kvm_arch_vcpu_put(vcpu); |
197 | preempt_notifier_unregister(&vcpu->preempt_notifier); | 195 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
198 | preempt_enable(); | 196 | preempt_enable(); |
199 | mutex_unlock(&vcpu->mutex); | 197 | mutex_unlock(&vcpu->mutex); |
@@ -2509,86 +2507,6 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |||
2509 | EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); | 2507 | EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); |
2510 | 2508 | ||
2511 | /* | 2509 | /* |
2512 | * Adapt set_msr() to msr_io()'s calling convention | ||
2513 | */ | ||
2514 | static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | ||
2515 | { | ||
2516 | return kvm_set_msr(vcpu, index, *data); | ||
2517 | } | ||
2518 | |||
2519 | /* | ||
2520 | * Read or write a bunch of msrs. All parameters are kernel addresses. | ||
2521 | * | ||
2522 | * @return number of msrs set successfully. | ||
2523 | */ | ||
2524 | static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, | ||
2525 | struct kvm_msr_entry *entries, | ||
2526 | int (*do_msr)(struct kvm_vcpu *vcpu, | ||
2527 | unsigned index, u64 *data)) | ||
2528 | { | ||
2529 | int i; | ||
2530 | |||
2531 | vcpu_load(vcpu); | ||
2532 | |||
2533 | for (i = 0; i < msrs->nmsrs; ++i) | ||
2534 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) | ||
2535 | break; | ||
2536 | |||
2537 | vcpu_put(vcpu); | ||
2538 | |||
2539 | return i; | ||
2540 | } | ||
2541 | |||
2542 | /* | ||
2543 | * Read or write a bunch of msrs. Parameters are user addresses. | ||
2544 | * | ||
2545 | * @return number of msrs set successfully. | ||
2546 | */ | ||
2547 | static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, | ||
2548 | int (*do_msr)(struct kvm_vcpu *vcpu, | ||
2549 | unsigned index, u64 *data), | ||
2550 | int writeback) | ||
2551 | { | ||
2552 | struct kvm_msrs msrs; | ||
2553 | struct kvm_msr_entry *entries; | ||
2554 | int r, n; | ||
2555 | unsigned size; | ||
2556 | |||
2557 | r = -EFAULT; | ||
2558 | if (copy_from_user(&msrs, user_msrs, sizeof msrs)) | ||
2559 | goto out; | ||
2560 | |||
2561 | r = -E2BIG; | ||
2562 | if (msrs.nmsrs >= MAX_IO_MSRS) | ||
2563 | goto out; | ||
2564 | |||
2565 | r = -ENOMEM; | ||
2566 | size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; | ||
2567 | entries = vmalloc(size); | ||
2568 | if (!entries) | ||
2569 | goto out; | ||
2570 | |||
2571 | r = -EFAULT; | ||
2572 | if (copy_from_user(entries, user_msrs->entries, size)) | ||
2573 | goto out_free; | ||
2574 | |||
2575 | r = n = __msr_io(vcpu, &msrs, entries, do_msr); | ||
2576 | if (r < 0) | ||
2577 | goto out_free; | ||
2578 | |||
2579 | r = -EFAULT; | ||
2580 | if (writeback && copy_to_user(user_msrs->entries, entries, size)) | ||
2581 | goto out_free; | ||
2582 | |||
2583 | r = n; | ||
2584 | |||
2585 | out_free: | ||
2586 | vfree(entries); | ||
2587 | out: | ||
2588 | return r; | ||
2589 | } | ||
2590 | |||
2591 | /* | ||
2592 | * Translate a guest virtual address to a guest physical address. | 2510 | * Translate a guest virtual address to a guest physical address. |
2593 | */ | 2511 | */ |
2594 | static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 2512 | static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
@@ -2761,48 +2679,6 @@ free_vcpu: | |||
2761 | return r; | 2679 | return r; |
2762 | } | 2680 | } |
2763 | 2681 | ||
2764 | static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) | ||
2765 | { | ||
2766 | u64 efer; | ||
2767 | int i; | ||
2768 | struct kvm_cpuid_entry *e, *entry; | ||
2769 | |||
2770 | rdmsrl(MSR_EFER, efer); | ||
2771 | entry = NULL; | ||
2772 | for (i = 0; i < vcpu->cpuid_nent; ++i) { | ||
2773 | e = &vcpu->cpuid_entries[i]; | ||
2774 | if (e->function == 0x80000001) { | ||
2775 | entry = e; | ||
2776 | break; | ||
2777 | } | ||
2778 | } | ||
2779 | if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) { | ||
2780 | entry->edx &= ~(1 << 20); | ||
2781 | printk(KERN_INFO "kvm: guest NX capability removed\n"); | ||
2782 | } | ||
2783 | } | ||
2784 | |||
2785 | static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | ||
2786 | struct kvm_cpuid *cpuid, | ||
2787 | struct kvm_cpuid_entry __user *entries) | ||
2788 | { | ||
2789 | int r; | ||
2790 | |||
2791 | r = -E2BIG; | ||
2792 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | ||
2793 | goto out; | ||
2794 | r = -EFAULT; | ||
2795 | if (copy_from_user(&vcpu->cpuid_entries, entries, | ||
2796 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | ||
2797 | goto out; | ||
2798 | vcpu->cpuid_nent = cpuid->nent; | ||
2799 | cpuid_fix_nx_cap(vcpu); | ||
2800 | return 0; | ||
2801 | |||
2802 | out: | ||
2803 | return r; | ||
2804 | } | ||
2805 | |||
2806 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | 2682 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) |
2807 | { | 2683 | { |
2808 | if (sigset) { | 2684 | if (sigset) { |
@@ -2875,33 +2751,12 @@ static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
2875 | return 0; | 2751 | return 0; |
2876 | } | 2752 | } |
2877 | 2753 | ||
2878 | static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, | ||
2879 | struct kvm_lapic_state *s) | ||
2880 | { | ||
2881 | vcpu_load(vcpu); | ||
2882 | memcpy(s->regs, vcpu->apic->regs, sizeof *s); | ||
2883 | vcpu_put(vcpu); | ||
2884 | |||
2885 | return 0; | ||
2886 | } | ||
2887 | |||
2888 | static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, | ||
2889 | struct kvm_lapic_state *s) | ||
2890 | { | ||
2891 | vcpu_load(vcpu); | ||
2892 | memcpy(vcpu->apic->regs, s->regs, sizeof *s); | ||
2893 | kvm_apic_post_state_restore(vcpu); | ||
2894 | vcpu_put(vcpu); | ||
2895 | |||
2896 | return 0; | ||
2897 | } | ||
2898 | |||
2899 | static long kvm_vcpu_ioctl(struct file *filp, | 2754 | static long kvm_vcpu_ioctl(struct file *filp, |
2900 | unsigned int ioctl, unsigned long arg) | 2755 | unsigned int ioctl, unsigned long arg) |
2901 | { | 2756 | { |
2902 | struct kvm_vcpu *vcpu = filp->private_data; | 2757 | struct kvm_vcpu *vcpu = filp->private_data; |
2903 | void __user *argp = (void __user *)arg; | 2758 | void __user *argp = (void __user *)arg; |
2904 | int r = -EINVAL; | 2759 | int r; |
2905 | 2760 | ||
2906 | switch (ioctl) { | 2761 | switch (ioctl) { |
2907 | case KVM_RUN: | 2762 | case KVM_RUN: |
@@ -2999,24 +2854,6 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
2999 | r = 0; | 2854 | r = 0; |
3000 | break; | 2855 | break; |
3001 | } | 2856 | } |
3002 | case KVM_GET_MSRS: | ||
3003 | r = msr_io(vcpu, argp, kvm_get_msr, 1); | ||
3004 | break; | ||
3005 | case KVM_SET_MSRS: | ||
3006 | r = msr_io(vcpu, argp, do_set_msr, 0); | ||
3007 | break; | ||
3008 | case KVM_SET_CPUID: { | ||
3009 | struct kvm_cpuid __user *cpuid_arg = argp; | ||
3010 | struct kvm_cpuid cpuid; | ||
3011 | |||
3012 | r = -EFAULT; | ||
3013 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
3014 | goto out; | ||
3015 | r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); | ||
3016 | if (r) | ||
3017 | goto out; | ||
3018 | break; | ||
3019 | } | ||
3020 | case KVM_SET_SIGNAL_MASK: { | 2857 | case KVM_SET_SIGNAL_MASK: { |
3021 | struct kvm_signal_mask __user *sigmask_arg = argp; | 2858 | struct kvm_signal_mask __user *sigmask_arg = argp; |
3022 | struct kvm_signal_mask kvm_sigmask; | 2859 | struct kvm_signal_mask kvm_sigmask; |
@@ -3065,33 +2902,8 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
3065 | r = 0; | 2902 | r = 0; |
3066 | break; | 2903 | break; |
3067 | } | 2904 | } |
3068 | case KVM_GET_LAPIC: { | ||
3069 | struct kvm_lapic_state lapic; | ||
3070 | |||
3071 | memset(&lapic, 0, sizeof lapic); | ||
3072 | r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic); | ||
3073 | if (r) | ||
3074 | goto out; | ||
3075 | r = -EFAULT; | ||
3076 | if (copy_to_user(argp, &lapic, sizeof lapic)) | ||
3077 | goto out; | ||
3078 | r = 0; | ||
3079 | break; | ||
3080 | } | ||
3081 | case KVM_SET_LAPIC: { | ||
3082 | struct kvm_lapic_state lapic; | ||
3083 | |||
3084 | r = -EFAULT; | ||
3085 | if (copy_from_user(&lapic, argp, sizeof lapic)) | ||
3086 | goto out; | ||
3087 | r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);; | ||
3088 | if (r) | ||
3089 | goto out; | ||
3090 | r = 0; | ||
3091 | break; | ||
3092 | } | ||
3093 | default: | 2905 | default: |
3094 | ; | 2906 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
3095 | } | 2907 | } |
3096 | out: | 2908 | out: |
3097 | return r; | 2909 | return r; |