diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 113 |
1 files changed, 18 insertions, 95 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 47a76c3a4c81..7fdfed52dbe7 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -50,8 +50,8 @@ | |||
50 | MODULE_AUTHOR("Qumranet"); | 50 | MODULE_AUTHOR("Qumranet"); |
51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
52 | 52 | ||
53 | static DEFINE_SPINLOCK(kvm_lock); | 53 | DEFINE_SPINLOCK(kvm_lock); |
54 | static LIST_HEAD(vm_list); | 54 | LIST_HEAD(vm_list); |
55 | 55 | ||
56 | static cpumask_t cpus_hardware_enabled; | 56 | static cpumask_t cpus_hardware_enabled; |
57 | 57 | ||
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | |||
124 | 124 | ||
125 | mutex_init(&vcpu->mutex); | 125 | mutex_init(&vcpu->mutex); |
126 | vcpu->cpu = -1; | 126 | vcpu->cpu = -1; |
127 | vcpu->mmu.root_hpa = INVALID_PAGE; | ||
128 | vcpu->kvm = kvm; | 127 | vcpu->kvm = kvm; |
129 | vcpu->vcpu_id = id; | 128 | vcpu->vcpu_id = id; |
130 | if (!irqchip_in_kernel(kvm) || id == 0) | ||
131 | vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; | ||
132 | else | ||
133 | vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED; | ||
134 | init_waitqueue_head(&vcpu->wq); | 129 | init_waitqueue_head(&vcpu->wq); |
135 | 130 | ||
136 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 131 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | |||
140 | } | 135 | } |
141 | vcpu->run = page_address(page); | 136 | vcpu->run = page_address(page); |
142 | 137 | ||
143 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 138 | r = kvm_arch_vcpu_init(vcpu); |
144 | if (!page) { | ||
145 | r = -ENOMEM; | ||
146 | goto fail_free_run; | ||
147 | } | ||
148 | vcpu->pio_data = page_address(page); | ||
149 | |||
150 | r = kvm_mmu_create(vcpu); | ||
151 | if (r < 0) | 139 | if (r < 0) |
152 | goto fail_free_pio_data; | 140 | goto fail_free_run; |
153 | |||
154 | if (irqchip_in_kernel(kvm)) { | ||
155 | r = kvm_create_lapic(vcpu); | ||
156 | if (r < 0) | ||
157 | goto fail_mmu_destroy; | ||
158 | } | ||
159 | |||
160 | return 0; | 141 | return 0; |
161 | 142 | ||
162 | fail_mmu_destroy: | ||
163 | kvm_mmu_destroy(vcpu); | ||
164 | fail_free_pio_data: | ||
165 | free_page((unsigned long)vcpu->pio_data); | ||
166 | fail_free_run: | 143 | fail_free_run: |
167 | free_page((unsigned long)vcpu->run); | 144 | free_page((unsigned long)vcpu->run); |
168 | fail: | 145 | fail: |
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init); | |||
172 | 149 | ||
173 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) | 150 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) |
174 | { | 151 | { |
175 | kvm_free_lapic(vcpu); | 152 | kvm_arch_vcpu_uninit(vcpu); |
176 | kvm_mmu_destroy(vcpu); | ||
177 | free_page((unsigned long)vcpu->pio_data); | ||
178 | free_page((unsigned long)vcpu->run); | 153 | free_page((unsigned long)vcpu->run); |
179 | } | 154 | } |
180 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | 155 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); |
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
240 | kvm_unload_vcpu_mmu(kvm->vcpus[i]); | 215 | kvm_unload_vcpu_mmu(kvm->vcpus[i]); |
241 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 216 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
242 | if (kvm->vcpus[i]) { | 217 | if (kvm->vcpus[i]) { |
243 | kvm_x86_ops->vcpu_free(kvm->vcpus[i]); | 218 | kvm_arch_vcpu_free(kvm->vcpus[i]); |
244 | kvm->vcpus[i] = NULL; | 219 | kvm->vcpus[i] = NULL; |
245 | } | 220 | } |
246 | } | 221 | } |
@@ -900,28 +875,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
900 | if (!valid_vcpu(n)) | 875 | if (!valid_vcpu(n)) |
901 | return -EINVAL; | 876 | return -EINVAL; |
902 | 877 | ||
903 | vcpu = kvm_x86_ops->vcpu_create(kvm, n); | 878 | vcpu = kvm_arch_vcpu_create(kvm, n); |
904 | if (IS_ERR(vcpu)) | 879 | if (IS_ERR(vcpu)) |
905 | return PTR_ERR(vcpu); | 880 | return PTR_ERR(vcpu); |
906 | 881 | ||
907 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); | 882 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
908 | 883 | ||
909 | /* We do fxsave: this must be aligned. */ | ||
910 | BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF); | ||
911 | |||
912 | vcpu_load(vcpu); | ||
913 | r = kvm_x86_ops->vcpu_reset(vcpu); | ||
914 | if (r == 0) | ||
915 | r = kvm_mmu_setup(vcpu); | ||
916 | vcpu_put(vcpu); | ||
917 | if (r < 0) | ||
918 | goto free_vcpu; | ||
919 | |||
920 | mutex_lock(&kvm->lock); | 884 | mutex_lock(&kvm->lock); |
921 | if (kvm->vcpus[n]) { | 885 | if (kvm->vcpus[n]) { |
922 | r = -EEXIST; | 886 | r = -EEXIST; |
923 | mutex_unlock(&kvm->lock); | 887 | mutex_unlock(&kvm->lock); |
924 | goto mmu_unload; | 888 | goto vcpu_destroy; |
925 | } | 889 | } |
926 | kvm->vcpus[n] = vcpu; | 890 | kvm->vcpus[n] = vcpu; |
927 | mutex_unlock(&kvm->lock); | 891 | mutex_unlock(&kvm->lock); |
@@ -936,14 +900,8 @@ unlink: | |||
936 | mutex_lock(&kvm->lock); | 900 | mutex_lock(&kvm->lock); |
937 | kvm->vcpus[n] = NULL; | 901 | kvm->vcpus[n] = NULL; |
938 | mutex_unlock(&kvm->lock); | 902 | mutex_unlock(&kvm->lock); |
939 | 903 | vcpu_destroy: | |
940 | mmu_unload: | 904 | kvm_arch_vcpu_destory(vcpu); |
941 | vcpu_load(vcpu); | ||
942 | kvm_mmu_unload(vcpu); | ||
943 | vcpu_put(vcpu); | ||
944 | |||
945 | free_vcpu: | ||
946 | kvm_x86_ops->vcpu_free(vcpu); | ||
947 | return r; | 905 | return r; |
948 | } | 906 | } |
949 | 907 | ||
@@ -1281,41 +1239,6 @@ static struct miscdevice kvm_dev = { | |||
1281 | &kvm_chardev_ops, | 1239 | &kvm_chardev_ops, |
1282 | }; | 1240 | }; |
1283 | 1241 | ||
1284 | /* | ||
1285 | * Make sure that a cpu that is being hot-unplugged does not have any vcpus | ||
1286 | * cached on it. | ||
1287 | */ | ||
1288 | static void decache_vcpus_on_cpu(int cpu) | ||
1289 | { | ||
1290 | struct kvm *vm; | ||
1291 | struct kvm_vcpu *vcpu; | ||
1292 | int i; | ||
1293 | |||
1294 | spin_lock(&kvm_lock); | ||
1295 | list_for_each_entry(vm, &vm_list, vm_list) | ||
1296 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
1297 | vcpu = vm->vcpus[i]; | ||
1298 | if (!vcpu) | ||
1299 | continue; | ||
1300 | /* | ||
1301 | * If the vcpu is locked, then it is running on some | ||
1302 | * other cpu and therefore it is not cached on the | ||
1303 | * cpu in question. | ||
1304 | * | ||
1305 | * If it's not locked, check the last cpu it executed | ||
1306 | * on. | ||
1307 | */ | ||
1308 | if (mutex_trylock(&vcpu->mutex)) { | ||
1309 | if (vcpu->cpu == cpu) { | ||
1310 | kvm_x86_ops->vcpu_decache(vcpu); | ||
1311 | vcpu->cpu = -1; | ||
1312 | } | ||
1313 | mutex_unlock(&vcpu->mutex); | ||
1314 | } | ||
1315 | } | ||
1316 | spin_unlock(&kvm_lock); | ||
1317 | } | ||
1318 | |||
1319 | static void hardware_enable(void *junk) | 1242 | static void hardware_enable(void *junk) |
1320 | { | 1243 | { |
1321 | int cpu = raw_smp_processor_id(); | 1244 | int cpu = raw_smp_processor_id(); |
@@ -1323,7 +1246,7 @@ static void hardware_enable(void *junk) | |||
1323 | if (cpu_isset(cpu, cpus_hardware_enabled)) | 1246 | if (cpu_isset(cpu, cpus_hardware_enabled)) |
1324 | return; | 1247 | return; |
1325 | cpu_set(cpu, cpus_hardware_enabled); | 1248 | cpu_set(cpu, cpus_hardware_enabled); |
1326 | kvm_x86_ops->hardware_enable(NULL); | 1249 | kvm_arch_hardware_enable(NULL); |
1327 | } | 1250 | } |
1328 | 1251 | ||
1329 | static void hardware_disable(void *junk) | 1252 | static void hardware_disable(void *junk) |
@@ -1334,7 +1257,7 @@ static void hardware_disable(void *junk) | |||
1334 | return; | 1257 | return; |
1335 | cpu_clear(cpu, cpus_hardware_enabled); | 1258 | cpu_clear(cpu, cpus_hardware_enabled); |
1336 | decache_vcpus_on_cpu(cpu); | 1259 | decache_vcpus_on_cpu(cpu); |
1337 | kvm_x86_ops->hardware_disable(NULL); | 1260 | kvm_arch_hardware_disable(NULL); |
1338 | } | 1261 | } |
1339 | 1262 | ||
1340 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | 1263 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, |
@@ -1500,7 +1423,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | |||
1500 | { | 1423 | { |
1501 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 1424 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
1502 | 1425 | ||
1503 | kvm_x86_ops->vcpu_load(vcpu, cpu); | 1426 | kvm_arch_vcpu_load(vcpu, cpu); |
1504 | } | 1427 | } |
1505 | 1428 | ||
1506 | static void kvm_sched_out(struct preempt_notifier *pn, | 1429 | static void kvm_sched_out(struct preempt_notifier *pn, |
@@ -1508,7 +1431,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, | |||
1508 | { | 1431 | { |
1509 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 1432 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
1510 | 1433 | ||
1511 | kvm_x86_ops->vcpu_put(vcpu); | 1434 | kvm_arch_vcpu_put(vcpu); |
1512 | } | 1435 | } |
1513 | 1436 | ||
1514 | int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size, | 1437 | int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size, |
@@ -1533,13 +1456,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size, | |||
1533 | 1456 | ||
1534 | kvm_x86_ops = ops; | 1457 | kvm_x86_ops = ops; |
1535 | 1458 | ||
1536 | r = kvm_x86_ops->hardware_setup(); | 1459 | r = kvm_arch_hardware_setup(); |
1537 | if (r < 0) | 1460 | if (r < 0) |
1538 | goto out; | 1461 | goto out; |
1539 | 1462 | ||
1540 | for_each_online_cpu(cpu) { | 1463 | for_each_online_cpu(cpu) { |
1541 | smp_call_function_single(cpu, | 1464 | smp_call_function_single(cpu, |
1542 | kvm_x86_ops->check_processor_compatibility, | 1465 | kvm_arch_check_processor_compat, |
1543 | &r, 0, 1); | 1466 | &r, 0, 1); |
1544 | if (r < 0) | 1467 | if (r < 0) |
1545 | goto out_free_0; | 1468 | goto out_free_0; |
@@ -1594,7 +1517,7 @@ out_free_2: | |||
1594 | out_free_1: | 1517 | out_free_1: |
1595 | on_each_cpu(hardware_disable, NULL, 0, 1); | 1518 | on_each_cpu(hardware_disable, NULL, 0, 1); |
1596 | out_free_0: | 1519 | out_free_0: |
1597 | kvm_x86_ops->hardware_unsetup(); | 1520 | kvm_arch_hardware_unsetup(); |
1598 | out: | 1521 | out: |
1599 | kvm_x86_ops = NULL; | 1522 | kvm_x86_ops = NULL; |
1600 | return r; | 1523 | return r; |
@@ -1610,7 +1533,7 @@ void kvm_exit_x86(void) | |||
1610 | unregister_reboot_notifier(&kvm_reboot_notifier); | 1533 | unregister_reboot_notifier(&kvm_reboot_notifier); |
1611 | unregister_cpu_notifier(&kvm_cpu_notifier); | 1534 | unregister_cpu_notifier(&kvm_cpu_notifier); |
1612 | on_each_cpu(hardware_disable, NULL, 0, 1); | 1535 | on_each_cpu(hardware_disable, NULL, 0, 1); |
1613 | kvm_x86_ops->hardware_unsetup(); | 1536 | kvm_arch_hardware_unsetup(); |
1614 | kvm_x86_ops = NULL; | 1537 | kvm_x86_ops = NULL; |
1615 | } | 1538 | } |
1616 | EXPORT_SYMBOL_GPL(kvm_exit_x86); | 1539 | EXPORT_SYMBOL_GPL(kvm_exit_x86); |