aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2007-10-11 13:16:52 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:52 -0500
commit313a3dc75da20630e549441932a7654223f8d72a (patch)
tree96af61f06c198834d49c886476ed30b418d853c0 /drivers/kvm
parentc4fcc2724628c6548748ec80a90b548fc300e81f (diff)
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c. Common ioctls for all architectures are: KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT, KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU Note that some PPC chips don't have an FPU, so we might need an #ifdef around KVM_GET/SET_FPU one day. x86 specific ioctls are: KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS An interresting aspect is vcpu_load/vcpu_put. We now have a common vcpu_load/put which does the preemption stuff, and an architecture specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the vmx/svm function defined in kvm_x86_ops. Signed-off-by: Carsten Otte <cotte@de.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h9
-rw-r--r--drivers/kvm/kvm_main.c200
-rw-r--r--drivers/kvm/x86.c219
3 files changed, 234 insertions, 194 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index d56962d49aa6..1edf8a5e365e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -537,6 +537,10 @@ extern struct kvm_x86_ops *kvm_x86_ops;
537int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 537int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
538void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 538void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
539 539
540void vcpu_load(struct kvm_vcpu *vcpu);
541void vcpu_put(struct kvm_vcpu *vcpu);
542
543
540int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size, 544int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
541 struct module *module); 545 struct module *module);
542void kvm_exit_x86(void); 546void kvm_exit_x86(void);
@@ -655,6 +659,11 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
655 659
656long kvm_arch_dev_ioctl(struct file *filp, 660long kvm_arch_dev_ioctl(struct file *filp,
657 unsigned int ioctl, unsigned long arg); 661 unsigned int ioctl, unsigned long arg);
662long kvm_arch_vcpu_ioctl(struct file *filp,
663 unsigned int ioctl, unsigned long arg);
664void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
665void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
666
658__init void kvm_arch_init(void); 667__init void kvm_arch_init(void);
659 668
660static inline void kvm_guest_enter(void) 669static inline void kvm_guest_enter(void)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index ec696887b222..5fd2864b7811 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -90,8 +90,6 @@ static struct kvm_stats_debugfs_item {
90 90
91static struct dentry *debugfs_dir; 91static struct dentry *debugfs_dir;
92 92
93#define MAX_IO_MSRS 256
94
95#define CR0_RESERVED_BITS \ 93#define CR0_RESERVED_BITS \
96 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ 94 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
97 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ 95 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
@@ -179,21 +177,21 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
179/* 177/*
180 * Switches to specified vcpu, until a matching vcpu_put() 178 * Switches to specified vcpu, until a matching vcpu_put()
181 */ 179 */
182static void vcpu_load(struct kvm_vcpu *vcpu) 180void vcpu_load(struct kvm_vcpu *vcpu)
183{ 181{
184 int cpu; 182 int cpu;
185 183
186 mutex_lock(&vcpu->mutex); 184 mutex_lock(&vcpu->mutex);
187 cpu = get_cpu(); 185 cpu = get_cpu();
188 preempt_notifier_register(&vcpu->preempt_notifier); 186 preempt_notifier_register(&vcpu->preempt_notifier);
189 kvm_x86_ops->vcpu_load(vcpu, cpu); 187 kvm_arch_vcpu_load(vcpu, cpu);
190 put_cpu(); 188 put_cpu();
191} 189}
192 190
193static void vcpu_put(struct kvm_vcpu *vcpu) 191void vcpu_put(struct kvm_vcpu *vcpu)
194{ 192{
195 preempt_disable(); 193 preempt_disable();
196 kvm_x86_ops->vcpu_put(vcpu); 194 kvm_arch_vcpu_put(vcpu);
197 preempt_notifier_unregister(&vcpu->preempt_notifier); 195 preempt_notifier_unregister(&vcpu->preempt_notifier);
198 preempt_enable(); 196 preempt_enable();
199 mutex_unlock(&vcpu->mutex); 197 mutex_unlock(&vcpu->mutex);
@@ -2509,86 +2507,6 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2509EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); 2507EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2510 2508
2511/* 2509/*
2512 * Adapt set_msr() to msr_io()'s calling convention
2513 */
2514static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2515{
2516 return kvm_set_msr(vcpu, index, *data);
2517}
2518
2519/*
2520 * Read or write a bunch of msrs. All parameters are kernel addresses.
2521 *
2522 * @return number of msrs set successfully.
2523 */
2524static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2525 struct kvm_msr_entry *entries,
2526 int (*do_msr)(struct kvm_vcpu *vcpu,
2527 unsigned index, u64 *data))
2528{
2529 int i;
2530
2531 vcpu_load(vcpu);
2532
2533 for (i = 0; i < msrs->nmsrs; ++i)
2534 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2535 break;
2536
2537 vcpu_put(vcpu);
2538
2539 return i;
2540}
2541
2542/*
2543 * Read or write a bunch of msrs. Parameters are user addresses.
2544 *
2545 * @return number of msrs set successfully.
2546 */
2547static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2548 int (*do_msr)(struct kvm_vcpu *vcpu,
2549 unsigned index, u64 *data),
2550 int writeback)
2551{
2552 struct kvm_msrs msrs;
2553 struct kvm_msr_entry *entries;
2554 int r, n;
2555 unsigned size;
2556
2557 r = -EFAULT;
2558 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2559 goto out;
2560
2561 r = -E2BIG;
2562 if (msrs.nmsrs >= MAX_IO_MSRS)
2563 goto out;
2564
2565 r = -ENOMEM;
2566 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2567 entries = vmalloc(size);
2568 if (!entries)
2569 goto out;
2570
2571 r = -EFAULT;
2572 if (copy_from_user(entries, user_msrs->entries, size))
2573 goto out_free;
2574
2575 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2576 if (r < 0)
2577 goto out_free;
2578
2579 r = -EFAULT;
2580 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2581 goto out_free;
2582
2583 r = n;
2584
2585out_free:
2586 vfree(entries);
2587out:
2588 return r;
2589}
2590
2591/*
2592 * Translate a guest virtual address to a guest physical address. 2510 * Translate a guest virtual address to a guest physical address.
2593 */ 2511 */
2594static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 2512static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
@@ -2761,48 +2679,6 @@ free_vcpu:
2761 return r; 2679 return r;
2762} 2680}
2763 2681
2764static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2765{
2766 u64 efer;
2767 int i;
2768 struct kvm_cpuid_entry *e, *entry;
2769
2770 rdmsrl(MSR_EFER, efer);
2771 entry = NULL;
2772 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2773 e = &vcpu->cpuid_entries[i];
2774 if (e->function == 0x80000001) {
2775 entry = e;
2776 break;
2777 }
2778 }
2779 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
2780 entry->edx &= ~(1 << 20);
2781 printk(KERN_INFO "kvm: guest NX capability removed\n");
2782 }
2783}
2784
2785static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2786 struct kvm_cpuid *cpuid,
2787 struct kvm_cpuid_entry __user *entries)
2788{
2789 int r;
2790
2791 r = -E2BIG;
2792 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2793 goto out;
2794 r = -EFAULT;
2795 if (copy_from_user(&vcpu->cpuid_entries, entries,
2796 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2797 goto out;
2798 vcpu->cpuid_nent = cpuid->nent;
2799 cpuid_fix_nx_cap(vcpu);
2800 return 0;
2801
2802out:
2803 return r;
2804}
2805
2806static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2682static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2807{ 2683{
2808 if (sigset) { 2684 if (sigset) {
@@ -2875,33 +2751,12 @@ static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2875 return 0; 2751 return 0;
2876} 2752}
2877 2753
2878static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2879 struct kvm_lapic_state *s)
2880{
2881 vcpu_load(vcpu);
2882 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
2883 vcpu_put(vcpu);
2884
2885 return 0;
2886}
2887
2888static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2889 struct kvm_lapic_state *s)
2890{
2891 vcpu_load(vcpu);
2892 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
2893 kvm_apic_post_state_restore(vcpu);
2894 vcpu_put(vcpu);
2895
2896 return 0;
2897}
2898
2899static long kvm_vcpu_ioctl(struct file *filp, 2754static long kvm_vcpu_ioctl(struct file *filp,
2900 unsigned int ioctl, unsigned long arg) 2755 unsigned int ioctl, unsigned long arg)
2901{ 2756{
2902 struct kvm_vcpu *vcpu = filp->private_data; 2757 struct kvm_vcpu *vcpu = filp->private_data;
2903 void __user *argp = (void __user *)arg; 2758 void __user *argp = (void __user *)arg;
2904 int r = -EINVAL; 2759 int r;
2905 2760
2906 switch (ioctl) { 2761 switch (ioctl) {
2907 case KVM_RUN: 2762 case KVM_RUN:
@@ -2999,24 +2854,6 @@ static long kvm_vcpu_ioctl(struct file *filp,
2999 r = 0; 2854 r = 0;
3000 break; 2855 break;
3001 } 2856 }
3002 case KVM_GET_MSRS:
3003 r = msr_io(vcpu, argp, kvm_get_msr, 1);
3004 break;
3005 case KVM_SET_MSRS:
3006 r = msr_io(vcpu, argp, do_set_msr, 0);
3007 break;
3008 case KVM_SET_CPUID: {
3009 struct kvm_cpuid __user *cpuid_arg = argp;
3010 struct kvm_cpuid cpuid;
3011
3012 r = -EFAULT;
3013 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3014 goto out;
3015 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3016 if (r)
3017 goto out;
3018 break;
3019 }
3020 case KVM_SET_SIGNAL_MASK: { 2857 case KVM_SET_SIGNAL_MASK: {
3021 struct kvm_signal_mask __user *sigmask_arg = argp; 2858 struct kvm_signal_mask __user *sigmask_arg = argp;
3022 struct kvm_signal_mask kvm_sigmask; 2859 struct kvm_signal_mask kvm_sigmask;
@@ -3065,33 +2902,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
3065 r = 0; 2902 r = 0;
3066 break; 2903 break;
3067 } 2904 }
3068 case KVM_GET_LAPIC: {
3069 struct kvm_lapic_state lapic;
3070
3071 memset(&lapic, 0, sizeof lapic);
3072 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
3073 if (r)
3074 goto out;
3075 r = -EFAULT;
3076 if (copy_to_user(argp, &lapic, sizeof lapic))
3077 goto out;
3078 r = 0;
3079 break;
3080 }
3081 case KVM_SET_LAPIC: {
3082 struct kvm_lapic_state lapic;
3083
3084 r = -EFAULT;
3085 if (copy_from_user(&lapic, argp, sizeof lapic))
3086 goto out;
3087 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
3088 if (r)
3089 goto out;
3090 r = 0;
3091 break;
3092 }
3093 default: 2905 default:
3094 ; 2906 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
3095 } 2907 }
3096out: 2908out:
3097 return r; 2909 return r;
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 437902cf178d..1fe209dd4caf 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -14,10 +14,18 @@
14 * 14 *
15 */ 15 */
16 16
17#include "kvm.h"
17#include "x86.h" 18#include "x86.h"
19#include "irq.h"
20
21#include <linux/kvm.h>
22#include <linux/fs.h>
23#include <linux/vmalloc.h>
18 24
19#include <asm/uaccess.h> 25#include <asm/uaccess.h>
20 26
27#define MAX_IO_MSRS 256
28
21/* 29/*
22 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 30 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
23 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 31 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -40,6 +48,86 @@ static u32 emulated_msrs[] = {
40 MSR_IA32_MISC_ENABLE, 48 MSR_IA32_MISC_ENABLE,
41}; 49};
42 50
51/*
52 * Adapt set_msr() to msr_io()'s calling convention
53 */
54static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
55{
56 return kvm_set_msr(vcpu, index, *data);
57}
58
59/*
60 * Read or write a bunch of msrs. All parameters are kernel addresses.
61 *
62 * @return number of msrs set successfully.
63 */
64static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
65 struct kvm_msr_entry *entries,
66 int (*do_msr)(struct kvm_vcpu *vcpu,
67 unsigned index, u64 *data))
68{
69 int i;
70
71 vcpu_load(vcpu);
72
73 for (i = 0; i < msrs->nmsrs; ++i)
74 if (do_msr(vcpu, entries[i].index, &entries[i].data))
75 break;
76
77 vcpu_put(vcpu);
78
79 return i;
80}
81
82/*
83 * Read or write a bunch of msrs. Parameters are user addresses.
84 *
85 * @return number of msrs set successfully.
86 */
87static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
88 int (*do_msr)(struct kvm_vcpu *vcpu,
89 unsigned index, u64 *data),
90 int writeback)
91{
92 struct kvm_msrs msrs;
93 struct kvm_msr_entry *entries;
94 int r, n;
95 unsigned size;
96
97 r = -EFAULT;
98 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
99 goto out;
100
101 r = -E2BIG;
102 if (msrs.nmsrs >= MAX_IO_MSRS)
103 goto out;
104
105 r = -ENOMEM;
106 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
107 entries = vmalloc(size);
108 if (!entries)
109 goto out;
110
111 r = -EFAULT;
112 if (copy_from_user(entries, user_msrs->entries, size))
113 goto out_free;
114
115 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
116 if (r < 0)
117 goto out_free;
118
119 r = -EFAULT;
120 if (writeback && copy_to_user(user_msrs->entries, entries, size))
121 goto out_free;
122
123 r = n;
124
125out_free:
126 vfree(entries);
127out:
128 return r;
129}
130
43long kvm_arch_dev_ioctl(struct file *filp, 131long kvm_arch_dev_ioctl(struct file *filp,
44 unsigned int ioctl, unsigned long arg) 132 unsigned int ioctl, unsigned long arg)
45{ 133{
@@ -81,6 +169,137 @@ out:
81 return r; 169 return r;
82} 170}
83 171
172void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
173{
174 kvm_x86_ops->vcpu_load(vcpu, cpu);
175}
176
177void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
178{
179 kvm_x86_ops->vcpu_put(vcpu);
180}
181
182static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
183{
184 u64 efer;
185 int i;
186 struct kvm_cpuid_entry *e, *entry;
187
188 rdmsrl(MSR_EFER, efer);
189 entry = NULL;
190 for (i = 0; i < vcpu->cpuid_nent; ++i) {
191 e = &vcpu->cpuid_entries[i];
192 if (e->function == 0x80000001) {
193 entry = e;
194 break;
195 }
196 }
197 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
198 entry->edx &= ~(1 << 20);
199 printk(KERN_INFO "kvm: guest NX capability removed\n");
200 }
201}
202
203static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
204 struct kvm_cpuid *cpuid,
205 struct kvm_cpuid_entry __user *entries)
206{
207 int r;
208
209 r = -E2BIG;
210 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
211 goto out;
212 r = -EFAULT;
213 if (copy_from_user(&vcpu->cpuid_entries, entries,
214 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
215 goto out;
216 vcpu->cpuid_nent = cpuid->nent;
217 cpuid_fix_nx_cap(vcpu);
218 return 0;
219
220out:
221 return r;
222}
223
224static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
225 struct kvm_lapic_state *s)
226{
227 vcpu_load(vcpu);
228 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
229 vcpu_put(vcpu);
230
231 return 0;
232}
233
234static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
235 struct kvm_lapic_state *s)
236{
237 vcpu_load(vcpu);
238 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
239 kvm_apic_post_state_restore(vcpu);
240 vcpu_put(vcpu);
241
242 return 0;
243}
244
245long kvm_arch_vcpu_ioctl(struct file *filp,
246 unsigned int ioctl, unsigned long arg)
247{
248 struct kvm_vcpu *vcpu = filp->private_data;
249 void __user *argp = (void __user *)arg;
250 int r;
251
252 switch (ioctl) {
253 case KVM_GET_LAPIC: {
254 struct kvm_lapic_state lapic;
255
256 memset(&lapic, 0, sizeof lapic);
257 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
258 if (r)
259 goto out;
260 r = -EFAULT;
261 if (copy_to_user(argp, &lapic, sizeof lapic))
262 goto out;
263 r = 0;
264 break;
265 }
266 case KVM_SET_LAPIC: {
267 struct kvm_lapic_state lapic;
268
269 r = -EFAULT;
270 if (copy_from_user(&lapic, argp, sizeof lapic))
271 goto out;
272 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
273 if (r)
274 goto out;
275 r = 0;
276 break;
277 }
278 case KVM_SET_CPUID: {
279 struct kvm_cpuid __user *cpuid_arg = argp;
280 struct kvm_cpuid cpuid;
281
282 r = -EFAULT;
283 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
284 goto out;
285 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
286 if (r)
287 goto out;
288 break;
289 }
290 case KVM_GET_MSRS:
291 r = msr_io(vcpu, argp, kvm_get_msr, 1);
292 break;
293 case KVM_SET_MSRS:
294 r = msr_io(vcpu, argp, do_set_msr, 0);
295 break;
296 default:
297 r = -EINVAL;
298 }
299out:
300 return r;
301}
302
84static __init void kvm_init_msr_list(void) 303static __init void kvm_init_msr_list(void)
85{ 304{
86 u32 dummy[2]; 305 u32 dummy[2];