aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-07-27 03:16:56 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:20 -0400
commitfb3f0f51d92d1496f9628ca6f0fb06a48dc9ed2a (patch)
tree38da1073dae5f30fd8f162669bb5a86959f8ace5 /drivers/kvm/kvm_main.c
parenta2fa3e9f52d875f7d4ca98434603b8756be71ba8 (diff)
KVM: Dynamically allocate vcpus
This patch converts the vcpus array in "struct kvm" to a pointer array, and changes the "vcpu_create" and "vcpu_setup" hooks into one "vcpu_create" call which does the allocation and initialization of the vcpu (calling back into the kvm_vcpu_init core helper). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c198
1 files changed, 103 insertions, 95 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index bf8b8f030192..69d9ab4e7cb4 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -266,8 +266,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
266 atomic_set(&completed, 0); 266 atomic_set(&completed, 0);
267 cpus_clear(cpus); 267 cpus_clear(cpus);
268 needed = 0; 268 needed = 0;
269 for (i = 0; i < kvm->nvcpus; ++i) { 269 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
270 vcpu = &kvm->vcpus[i]; 270 vcpu = kvm->vcpus[i];
271 if (!vcpu)
272 continue;
271 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests)) 273 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
272 continue; 274 continue;
273 cpu = vcpu->cpu; 275 cpu = vcpu->cpu;
@@ -291,10 +293,61 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
291 } 293 }
292} 294}
293 295
296int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
297{
298 struct page *page;
299 int r;
300
301 mutex_init(&vcpu->mutex);
302 vcpu->cpu = -1;
303 vcpu->mmu.root_hpa = INVALID_PAGE;
304 vcpu->kvm = kvm;
305 vcpu->vcpu_id = id;
306
307 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
308 if (!page) {
309 r = -ENOMEM;
310 goto fail;
311 }
312 vcpu->run = page_address(page);
313
314 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
315 if (!page) {
316 r = -ENOMEM;
317 goto fail_free_run;
318 }
319 vcpu->pio_data = page_address(page);
320
321 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
322 FX_IMAGE_ALIGN);
323 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
324
325 r = kvm_mmu_create(vcpu);
326 if (r < 0)
327 goto fail_free_pio_data;
328
329 return 0;
330
331fail_free_pio_data:
332 free_page((unsigned long)vcpu->pio_data);
333fail_free_run:
334 free_page((unsigned long)vcpu->run);
335fail:
336 return -ENOMEM;
337}
338EXPORT_SYMBOL_GPL(kvm_vcpu_init);
339
340void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
341{
342 kvm_mmu_destroy(vcpu);
343 free_page((unsigned long)vcpu->pio_data);
344 free_page((unsigned long)vcpu->run);
345}
346EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
347
294static struct kvm *kvm_create_vm(void) 348static struct kvm *kvm_create_vm(void)
295{ 349{
296 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 350 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
297 int i;
298 351
299 if (!kvm) 352 if (!kvm)
300 return ERR_PTR(-ENOMEM); 353 return ERR_PTR(-ENOMEM);
@@ -303,14 +356,6 @@ static struct kvm *kvm_create_vm(void)
303 spin_lock_init(&kvm->lock); 356 spin_lock_init(&kvm->lock);
304 INIT_LIST_HEAD(&kvm->active_mmu_pages); 357 INIT_LIST_HEAD(&kvm->active_mmu_pages);
305 kvm_io_bus_init(&kvm->mmio_bus); 358 kvm_io_bus_init(&kvm->mmio_bus);
306 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
307 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
308
309 mutex_init(&vcpu->mutex);
310 vcpu->cpu = -1;
311 vcpu->kvm = kvm;
312 vcpu->mmu.root_hpa = INVALID_PAGE;
313 }
314 spin_lock(&kvm_lock); 359 spin_lock(&kvm_lock);
315 list_add(&kvm->vm_list, &vm_list); 360 list_add(&kvm->vm_list, &vm_list);
316 spin_unlock(&kvm_lock); 361 spin_unlock(&kvm_lock);
@@ -367,30 +412,11 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
367 412
368static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 413static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
369{ 414{
370 if (!vcpu->valid)
371 return;
372
373 vcpu_load(vcpu); 415 vcpu_load(vcpu);
374 kvm_mmu_unload(vcpu); 416 kvm_mmu_unload(vcpu);
375 vcpu_put(vcpu); 417 vcpu_put(vcpu);
376} 418}
377 419
378static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
379{
380 if (!vcpu->valid)
381 return;
382
383 vcpu_load(vcpu);
384 kvm_mmu_destroy(vcpu);
385 vcpu_put(vcpu);
386 kvm_arch_ops->vcpu_free(vcpu);
387 free_page((unsigned long)vcpu->run);
388 vcpu->run = NULL;
389 free_page((unsigned long)vcpu->pio_data);
390 vcpu->pio_data = NULL;
391 free_pio_guest_pages(vcpu);
392}
393
394static void kvm_free_vcpus(struct kvm *kvm) 420static void kvm_free_vcpus(struct kvm *kvm)
395{ 421{
396 unsigned int i; 422 unsigned int i;
@@ -399,9 +425,15 @@ static void kvm_free_vcpus(struct kvm *kvm)
399 * Unpin any mmu pages first. 425 * Unpin any mmu pages first.
400 */ 426 */
401 for (i = 0; i < KVM_MAX_VCPUS; ++i) 427 for (i = 0; i < KVM_MAX_VCPUS; ++i)
402 kvm_unload_vcpu_mmu(&kvm->vcpus[i]); 428 if (kvm->vcpus[i])
403 for (i = 0; i < KVM_MAX_VCPUS; ++i) 429 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
404 kvm_free_vcpu(&kvm->vcpus[i]); 430 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
431 if (kvm->vcpus[i]) {
432 kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
433 kvm->vcpus[i] = NULL;
434 }
435 }
436
405} 437}
406 438
407static int kvm_dev_release(struct inode *inode, struct file *filp) 439static int kvm_dev_release(struct inode *inode, struct file *filp)
@@ -2372,77 +2404,47 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2372{ 2404{
2373 int r; 2405 int r;
2374 struct kvm_vcpu *vcpu; 2406 struct kvm_vcpu *vcpu;
2375 struct page *page;
2376 2407
2377 r = -EINVAL;
2378 if (!valid_vcpu(n)) 2408 if (!valid_vcpu(n))
2379 goto out; 2409 return -EINVAL;
2380
2381 vcpu = &kvm->vcpus[n];
2382 vcpu->vcpu_id = n;
2383
2384 mutex_lock(&vcpu->mutex);
2385
2386 if (vcpu->valid) {
2387 mutex_unlock(&vcpu->mutex);
2388 return -EEXIST;
2389 }
2390
2391 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2392 r = -ENOMEM;
2393 if (!page)
2394 goto out_unlock;
2395 vcpu->run = page_address(page);
2396
2397 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2398 r = -ENOMEM;
2399 if (!page)
2400 goto out_free_run;
2401 vcpu->pio_data = page_address(page);
2402
2403 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2404 FX_IMAGE_ALIGN);
2405 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
2406 vcpu->cr0 = 0x10;
2407
2408 r = kvm_arch_ops->vcpu_create(vcpu);
2409 if (r < 0)
2410 goto out_free_vcpus;
2411 2410
2412 r = kvm_mmu_create(vcpu); 2411 vcpu = kvm_arch_ops->vcpu_create(kvm, n);
2413 if (r < 0) 2412 if (IS_ERR(vcpu))
2414 goto out_free_vcpus; 2413 return PTR_ERR(vcpu);
2415 2414
2416 kvm_arch_ops->vcpu_load(vcpu); 2415 vcpu_load(vcpu);
2417 r = kvm_mmu_setup(vcpu); 2416 r = kvm_mmu_setup(vcpu);
2418 if (r >= 0)
2419 r = kvm_arch_ops->vcpu_setup(vcpu);
2420 vcpu_put(vcpu); 2417 vcpu_put(vcpu);
2421
2422 if (r < 0) 2418 if (r < 0)
2423 goto out_free_vcpus; 2419 goto free_vcpu;
2424 2420
2421 spin_lock(&kvm->lock);
2422 if (kvm->vcpus[n]) {
2423 r = -EEXIST;
2424 spin_unlock(&kvm->lock);
2425 goto mmu_unload;
2426 }
2427 kvm->vcpus[n] = vcpu;
2428 spin_unlock(&kvm->lock);
2429
2430 /* Now it's all set up, let userspace reach it */
2425 r = create_vcpu_fd(vcpu); 2431 r = create_vcpu_fd(vcpu);
2426 if (r < 0) 2432 if (r < 0)
2427 goto out_free_vcpus; 2433 goto unlink;
2428 2434 return r;
2429 spin_lock(&kvm_lock);
2430 if (n >= kvm->nvcpus)
2431 kvm->nvcpus = n + 1;
2432 spin_unlock(&kvm_lock);
2433 2435
2434 vcpu->valid = 1; 2436unlink:
2437 spin_lock(&kvm->lock);
2438 kvm->vcpus[n] = NULL;
2439 spin_unlock(&kvm->lock);
2435 2440
2436 return r; 2441mmu_unload:
2442 vcpu_load(vcpu);
2443 kvm_mmu_unload(vcpu);
2444 vcpu_put(vcpu);
2437 2445
2438out_free_vcpus: 2446free_vcpu:
2439 kvm_free_vcpu(vcpu); 2447 kvm_arch_ops->vcpu_free(vcpu);
2440out_free_run:
2441 free_page((unsigned long)vcpu->run);
2442 vcpu->run = NULL;
2443out_unlock:
2444 mutex_unlock(&vcpu->mutex);
2445out:
2446 return r; 2448 return r;
2447} 2449}
2448 2450
@@ -2935,9 +2937,12 @@ static void decache_vcpus_on_cpu(int cpu)
2935 int i; 2937 int i;
2936 2938
2937 spin_lock(&kvm_lock); 2939 spin_lock(&kvm_lock);
2938 list_for_each_entry(vm, &vm_list, vm_list) 2940 list_for_each_entry(vm, &vm_list, vm_list) {
2941 spin_lock(&vm->lock);
2939 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 2942 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2940 vcpu = &vm->vcpus[i]; 2943 vcpu = vm->vcpus[i];
2944 if (!vcpu)
2945 continue;
2941 /* 2946 /*
2942 * If the vcpu is locked, then it is running on some 2947 * If the vcpu is locked, then it is running on some
2943 * other cpu and therefore it is not cached on the 2948 * other cpu and therefore it is not cached on the
@@ -2954,6 +2959,8 @@ static void decache_vcpus_on_cpu(int cpu)
2954 mutex_unlock(&vcpu->mutex); 2959 mutex_unlock(&vcpu->mutex);
2955 } 2960 }
2956 } 2961 }
2962 spin_unlock(&vm->lock);
2963 }
2957 spin_unlock(&kvm_lock); 2964 spin_unlock(&kvm_lock);
2958} 2965}
2959 2966
@@ -3078,8 +3085,9 @@ static u64 stat_get(void *_offset)
3078 spin_lock(&kvm_lock); 3085 spin_lock(&kvm_lock);
3079 list_for_each_entry(kvm, &vm_list, vm_list) 3086 list_for_each_entry(kvm, &vm_list, vm_list)
3080 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 3087 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3081 vcpu = &kvm->vcpus[i]; 3088 vcpu = kvm->vcpus[i];
3082 total += *(u32 *)((void *)vcpu + offset); 3089 if (vcpu)
3090 total += *(u32 *)((void *)vcpu + offset);
3083 } 3091 }
3084 spin_unlock(&kvm_lock); 3092 spin_unlock(&kvm_lock);
3085 return total; 3093 return total;