diff options
-rw-r--r-- | drivers/kvm/kvm.h | 7 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 255 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 258 |
3 files changed, 271 insertions, 249 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 3d07d9b1b815..516f79ffd126 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -620,6 +620,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
620 | unsigned int ioctl, unsigned long arg); | 620 | unsigned int ioctl, unsigned long arg); |
621 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 621 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
622 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | 622 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
623 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | ||
624 | struct | ||
625 | kvm_userspace_memory_region *mem, | ||
626 | int user_alloc); | ||
627 | long kvm_arch_vm_ioctl(struct file *filp, | ||
628 | unsigned int ioctl, unsigned long arg); | ||
629 | void kvm_arch_destroy_vm(struct kvm *kvm); | ||
623 | 630 | ||
624 | __init void kvm_arch_init(void); | 631 | __init void kvm_arch_init(void); |
625 | 632 | ||
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 50fd040b9ea9..c632e3a3b514 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -792,36 +792,16 @@ out: | |||
792 | } | 792 | } |
793 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 793 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
794 | 794 | ||
795 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 795 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
796 | struct | 796 | struct |
797 | kvm_userspace_memory_region *mem, | 797 | kvm_userspace_memory_region *mem, |
798 | int user_alloc) | 798 | int user_alloc) |
799 | { | 799 | { |
800 | if (mem->slot >= KVM_MEMORY_SLOTS) | 800 | if (mem->slot >= KVM_MEMORY_SLOTS) |
801 | return -EINVAL; | 801 | return -EINVAL; |
802 | return kvm_set_memory_region(kvm, mem, user_alloc); | 802 | return kvm_set_memory_region(kvm, mem, user_alloc); |
803 | } | 803 | } |
804 | 804 | ||
805 | static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | ||
806 | u32 kvm_nr_mmu_pages) | ||
807 | { | ||
808 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | ||
809 | return -EINVAL; | ||
810 | |||
811 | mutex_lock(&kvm->lock); | ||
812 | |||
813 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | ||
814 | kvm->n_requested_mmu_pages = kvm_nr_mmu_pages; | ||
815 | |||
816 | mutex_unlock(&kvm->lock); | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | ||
821 | { | ||
822 | return kvm->n_alloc_mmu_pages; | ||
823 | } | ||
824 | |||
825 | /* | 805 | /* |
826 | * Get (and clear) the dirty memory log for a memory slot. | 806 | * Get (and clear) the dirty memory log for a memory slot. |
827 | */ | 807 | */ |
@@ -867,111 +847,6 @@ out: | |||
867 | return r; | 847 | return r; |
868 | } | 848 | } |
869 | 849 | ||
870 | /* | ||
871 | * Set a new alias region. Aliases map a portion of physical memory into | ||
872 | * another portion. This is useful for memory windows, for example the PC | ||
873 | * VGA region. | ||
874 | */ | ||
875 | static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | ||
876 | struct kvm_memory_alias *alias) | ||
877 | { | ||
878 | int r, n; | ||
879 | struct kvm_mem_alias *p; | ||
880 | |||
881 | r = -EINVAL; | ||
882 | /* General sanity checks */ | ||
883 | if (alias->memory_size & (PAGE_SIZE - 1)) | ||
884 | goto out; | ||
885 | if (alias->guest_phys_addr & (PAGE_SIZE - 1)) | ||
886 | goto out; | ||
887 | if (alias->slot >= KVM_ALIAS_SLOTS) | ||
888 | goto out; | ||
889 | if (alias->guest_phys_addr + alias->memory_size | ||
890 | < alias->guest_phys_addr) | ||
891 | goto out; | ||
892 | if (alias->target_phys_addr + alias->memory_size | ||
893 | < alias->target_phys_addr) | ||
894 | goto out; | ||
895 | |||
896 | mutex_lock(&kvm->lock); | ||
897 | |||
898 | p = &kvm->aliases[alias->slot]; | ||
899 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | ||
900 | p->npages = alias->memory_size >> PAGE_SHIFT; | ||
901 | p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; | ||
902 | |||
903 | for (n = KVM_ALIAS_SLOTS; n > 0; --n) | ||
904 | if (kvm->aliases[n - 1].npages) | ||
905 | break; | ||
906 | kvm->naliases = n; | ||
907 | |||
908 | kvm_mmu_zap_all(kvm); | ||
909 | |||
910 | mutex_unlock(&kvm->lock); | ||
911 | |||
912 | return 0; | ||
913 | |||
914 | out: | ||
915 | return r; | ||
916 | } | ||
917 | |||
918 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
919 | { | ||
920 | int r; | ||
921 | |||
922 | r = 0; | ||
923 | switch (chip->chip_id) { | ||
924 | case KVM_IRQCHIP_PIC_MASTER: | ||
925 | memcpy(&chip->chip.pic, | ||
926 | &pic_irqchip(kvm)->pics[0], | ||
927 | sizeof(struct kvm_pic_state)); | ||
928 | break; | ||
929 | case KVM_IRQCHIP_PIC_SLAVE: | ||
930 | memcpy(&chip->chip.pic, | ||
931 | &pic_irqchip(kvm)->pics[1], | ||
932 | sizeof(struct kvm_pic_state)); | ||
933 | break; | ||
934 | case KVM_IRQCHIP_IOAPIC: | ||
935 | memcpy(&chip->chip.ioapic, | ||
936 | ioapic_irqchip(kvm), | ||
937 | sizeof(struct kvm_ioapic_state)); | ||
938 | break; | ||
939 | default: | ||
940 | r = -EINVAL; | ||
941 | break; | ||
942 | } | ||
943 | return r; | ||
944 | } | ||
945 | |||
946 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
947 | { | ||
948 | int r; | ||
949 | |||
950 | r = 0; | ||
951 | switch (chip->chip_id) { | ||
952 | case KVM_IRQCHIP_PIC_MASTER: | ||
953 | memcpy(&pic_irqchip(kvm)->pics[0], | ||
954 | &chip->chip.pic, | ||
955 | sizeof(struct kvm_pic_state)); | ||
956 | break; | ||
957 | case KVM_IRQCHIP_PIC_SLAVE: | ||
958 | memcpy(&pic_irqchip(kvm)->pics[1], | ||
959 | &chip->chip.pic, | ||
960 | sizeof(struct kvm_pic_state)); | ||
961 | break; | ||
962 | case KVM_IRQCHIP_IOAPIC: | ||
963 | memcpy(ioapic_irqchip(kvm), | ||
964 | &chip->chip.ioapic, | ||
965 | sizeof(struct kvm_ioapic_state)); | ||
966 | break; | ||
967 | default: | ||
968 | r = -EINVAL; | ||
969 | break; | ||
970 | } | ||
971 | kvm_pic_update_irq(pic_irqchip(kvm)); | ||
972 | return r; | ||
973 | } | ||
974 | |||
975 | int is_error_page(struct page *page) | 850 | int is_error_page(struct page *page) |
976 | { | 851 | { |
977 | return page == bad_page; | 852 | return page == bad_page; |
@@ -2661,16 +2536,6 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) | |||
2661 | return fd; | 2536 | return fd; |
2662 | } | 2537 | } |
2663 | 2538 | ||
2664 | static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) | ||
2665 | { | ||
2666 | int ret; | ||
2667 | |||
2668 | if (addr > (unsigned int)(-3 * PAGE_SIZE)) | ||
2669 | return -1; | ||
2670 | ret = kvm_x86_ops->set_tss_addr(kvm, addr); | ||
2671 | return ret; | ||
2672 | } | ||
2673 | |||
2674 | /* | 2539 | /* |
2675 | * Creates some virtual cpus. Good luck creating more than one. | 2540 | * Creates some virtual cpus. Good luck creating more than one. |
2676 | */ | 2541 | */ |
@@ -2964,35 +2829,14 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2964 | { | 2829 | { |
2965 | struct kvm *kvm = filp->private_data; | 2830 | struct kvm *kvm = filp->private_data; |
2966 | void __user *argp = (void __user *)arg; | 2831 | void __user *argp = (void __user *)arg; |
2967 | int r = -EINVAL; | 2832 | int r; |
2968 | 2833 | ||
2969 | switch (ioctl) { | 2834 | switch (ioctl) { |
2970 | case KVM_SET_TSS_ADDR: | ||
2971 | r = kvm_vm_ioctl_set_tss_addr(kvm, arg); | ||
2972 | if (r < 0) | ||
2973 | goto out; | ||
2974 | break; | ||
2975 | case KVM_CREATE_VCPU: | 2835 | case KVM_CREATE_VCPU: |
2976 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); | 2836 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); |
2977 | if (r < 0) | 2837 | if (r < 0) |
2978 | goto out; | 2838 | goto out; |
2979 | break; | 2839 | break; |
2980 | case KVM_SET_MEMORY_REGION: { | ||
2981 | struct kvm_memory_region kvm_mem; | ||
2982 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
2983 | |||
2984 | r = -EFAULT; | ||
2985 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | ||
2986 | goto out; | ||
2987 | kvm_userspace_mem.slot = kvm_mem.slot; | ||
2988 | kvm_userspace_mem.flags = kvm_mem.flags; | ||
2989 | kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr; | ||
2990 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | ||
2991 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); | ||
2992 | if (r) | ||
2993 | goto out; | ||
2994 | break; | ||
2995 | } | ||
2996 | case KVM_SET_USER_MEMORY_REGION: { | 2840 | case KVM_SET_USER_MEMORY_REGION: { |
2997 | struct kvm_userspace_memory_region kvm_userspace_mem; | 2841 | struct kvm_userspace_memory_region kvm_userspace_mem; |
2998 | 2842 | ||
@@ -3006,14 +2850,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
3006 | goto out; | 2850 | goto out; |
3007 | break; | 2851 | break; |
3008 | } | 2852 | } |
3009 | case KVM_SET_NR_MMU_PAGES: | ||
3010 | r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); | ||
3011 | if (r) | ||
3012 | goto out; | ||
3013 | break; | ||
3014 | case KVM_GET_NR_MMU_PAGES: | ||
3015 | r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); | ||
3016 | break; | ||
3017 | case KVM_GET_DIRTY_LOG: { | 2853 | case KVM_GET_DIRTY_LOG: { |
3018 | struct kvm_dirty_log log; | 2854 | struct kvm_dirty_log log; |
3019 | 2855 | ||
@@ -3025,87 +2861,8 @@ static long kvm_vm_ioctl(struct file *filp, | |||
3025 | goto out; | 2861 | goto out; |
3026 | break; | 2862 | break; |
3027 | } | 2863 | } |
3028 | case KVM_SET_MEMORY_ALIAS: { | ||
3029 | struct kvm_memory_alias alias; | ||
3030 | |||
3031 | r = -EFAULT; | ||
3032 | if (copy_from_user(&alias, argp, sizeof alias)) | ||
3033 | goto out; | ||
3034 | r = kvm_vm_ioctl_set_memory_alias(kvm, &alias); | ||
3035 | if (r) | ||
3036 | goto out; | ||
3037 | break; | ||
3038 | } | ||
3039 | case KVM_CREATE_IRQCHIP: | ||
3040 | r = -ENOMEM; | ||
3041 | kvm->vpic = kvm_create_pic(kvm); | ||
3042 | if (kvm->vpic) { | ||
3043 | r = kvm_ioapic_init(kvm); | ||
3044 | if (r) { | ||
3045 | kfree(kvm->vpic); | ||
3046 | kvm->vpic = NULL; | ||
3047 | goto out; | ||
3048 | } | ||
3049 | } else | ||
3050 | goto out; | ||
3051 | break; | ||
3052 | case KVM_IRQ_LINE: { | ||
3053 | struct kvm_irq_level irq_event; | ||
3054 | |||
3055 | r = -EFAULT; | ||
3056 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | ||
3057 | goto out; | ||
3058 | if (irqchip_in_kernel(kvm)) { | ||
3059 | mutex_lock(&kvm->lock); | ||
3060 | if (irq_event.irq < 16) | ||
3061 | kvm_pic_set_irq(pic_irqchip(kvm), | ||
3062 | irq_event.irq, | ||
3063 | irq_event.level); | ||
3064 | kvm_ioapic_set_irq(kvm->vioapic, | ||
3065 | irq_event.irq, | ||
3066 | irq_event.level); | ||
3067 | mutex_unlock(&kvm->lock); | ||
3068 | r = 0; | ||
3069 | } | ||
3070 | break; | ||
3071 | } | ||
3072 | case KVM_GET_IRQCHIP: { | ||
3073 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
3074 | struct kvm_irqchip chip; | ||
3075 | |||
3076 | r = -EFAULT; | ||
3077 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
3078 | goto out; | ||
3079 | r = -ENXIO; | ||
3080 | if (!irqchip_in_kernel(kvm)) | ||
3081 | goto out; | ||
3082 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | ||
3083 | if (r) | ||
3084 | goto out; | ||
3085 | r = -EFAULT; | ||
3086 | if (copy_to_user(argp, &chip, sizeof chip)) | ||
3087 | goto out; | ||
3088 | r = 0; | ||
3089 | break; | ||
3090 | } | ||
3091 | case KVM_SET_IRQCHIP: { | ||
3092 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
3093 | struct kvm_irqchip chip; | ||
3094 | |||
3095 | r = -EFAULT; | ||
3096 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
3097 | goto out; | ||
3098 | r = -ENXIO; | ||
3099 | if (!irqchip_in_kernel(kvm)) | ||
3100 | goto out; | ||
3101 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | ||
3102 | if (r) | ||
3103 | goto out; | ||
3104 | r = 0; | ||
3105 | break; | ||
3106 | } | ||
3107 | default: | 2864 | default: |
3108 | ; | 2865 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
3109 | } | 2866 | } |
3110 | out: | 2867 | out: |
3111 | return r; | 2868 | return r; |
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 1fe209dd4caf..b84cb6707f78 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -300,6 +300,264 @@ out: | |||
300 | return r; | 300 | return r; |
301 | } | 301 | } |
302 | 302 | ||
303 | static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) | ||
304 | { | ||
305 | int ret; | ||
306 | |||
307 | if (addr > (unsigned int)(-3 * PAGE_SIZE)) | ||
308 | return -1; | ||
309 | ret = kvm_x86_ops->set_tss_addr(kvm, addr); | ||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | ||
314 | u32 kvm_nr_mmu_pages) | ||
315 | { | ||
316 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | ||
317 | return -EINVAL; | ||
318 | |||
319 | mutex_lock(&kvm->lock); | ||
320 | |||
321 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | ||
322 | kvm->n_requested_mmu_pages = kvm_nr_mmu_pages; | ||
323 | |||
324 | mutex_unlock(&kvm->lock); | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | ||
329 | { | ||
330 | return kvm->n_alloc_mmu_pages; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Set a new alias region. Aliases map a portion of physical memory into | ||
335 | * another portion. This is useful for memory windows, for example the PC | ||
336 | * VGA region. | ||
337 | */ | ||
338 | static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | ||
339 | struct kvm_memory_alias *alias) | ||
340 | { | ||
341 | int r, n; | ||
342 | struct kvm_mem_alias *p; | ||
343 | |||
344 | r = -EINVAL; | ||
345 | /* General sanity checks */ | ||
346 | if (alias->memory_size & (PAGE_SIZE - 1)) | ||
347 | goto out; | ||
348 | if (alias->guest_phys_addr & (PAGE_SIZE - 1)) | ||
349 | goto out; | ||
350 | if (alias->slot >= KVM_ALIAS_SLOTS) | ||
351 | goto out; | ||
352 | if (alias->guest_phys_addr + alias->memory_size | ||
353 | < alias->guest_phys_addr) | ||
354 | goto out; | ||
355 | if (alias->target_phys_addr + alias->memory_size | ||
356 | < alias->target_phys_addr) | ||
357 | goto out; | ||
358 | |||
359 | mutex_lock(&kvm->lock); | ||
360 | |||
361 | p = &kvm->aliases[alias->slot]; | ||
362 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | ||
363 | p->npages = alias->memory_size >> PAGE_SHIFT; | ||
364 | p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; | ||
365 | |||
366 | for (n = KVM_ALIAS_SLOTS; n > 0; --n) | ||
367 | if (kvm->aliases[n - 1].npages) | ||
368 | break; | ||
369 | kvm->naliases = n; | ||
370 | |||
371 | kvm_mmu_zap_all(kvm); | ||
372 | |||
373 | mutex_unlock(&kvm->lock); | ||
374 | |||
375 | return 0; | ||
376 | |||
377 | out: | ||
378 | return r; | ||
379 | } | ||
380 | |||
381 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
382 | { | ||
383 | int r; | ||
384 | |||
385 | r = 0; | ||
386 | switch (chip->chip_id) { | ||
387 | case KVM_IRQCHIP_PIC_MASTER: | ||
388 | memcpy(&chip->chip.pic, | ||
389 | &pic_irqchip(kvm)->pics[0], | ||
390 | sizeof(struct kvm_pic_state)); | ||
391 | break; | ||
392 | case KVM_IRQCHIP_PIC_SLAVE: | ||
393 | memcpy(&chip->chip.pic, | ||
394 | &pic_irqchip(kvm)->pics[1], | ||
395 | sizeof(struct kvm_pic_state)); | ||
396 | break; | ||
397 | case KVM_IRQCHIP_IOAPIC: | ||
398 | memcpy(&chip->chip.ioapic, | ||
399 | ioapic_irqchip(kvm), | ||
400 | sizeof(struct kvm_ioapic_state)); | ||
401 | break; | ||
402 | default: | ||
403 | r = -EINVAL; | ||
404 | break; | ||
405 | } | ||
406 | return r; | ||
407 | } | ||
408 | |||
409 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
410 | { | ||
411 | int r; | ||
412 | |||
413 | r = 0; | ||
414 | switch (chip->chip_id) { | ||
415 | case KVM_IRQCHIP_PIC_MASTER: | ||
416 | memcpy(&pic_irqchip(kvm)->pics[0], | ||
417 | &chip->chip.pic, | ||
418 | sizeof(struct kvm_pic_state)); | ||
419 | break; | ||
420 | case KVM_IRQCHIP_PIC_SLAVE: | ||
421 | memcpy(&pic_irqchip(kvm)->pics[1], | ||
422 | &chip->chip.pic, | ||
423 | sizeof(struct kvm_pic_state)); | ||
424 | break; | ||
425 | case KVM_IRQCHIP_IOAPIC: | ||
426 | memcpy(ioapic_irqchip(kvm), | ||
427 | &chip->chip.ioapic, | ||
428 | sizeof(struct kvm_ioapic_state)); | ||
429 | break; | ||
430 | default: | ||
431 | r = -EINVAL; | ||
432 | break; | ||
433 | } | ||
434 | kvm_pic_update_irq(pic_irqchip(kvm)); | ||
435 | return r; | ||
436 | } | ||
437 | |||
438 | long kvm_arch_vm_ioctl(struct file *filp, | ||
439 | unsigned int ioctl, unsigned long arg) | ||
440 | { | ||
441 | struct kvm *kvm = filp->private_data; | ||
442 | void __user *argp = (void __user *)arg; | ||
443 | int r = -EINVAL; | ||
444 | |||
445 | switch (ioctl) { | ||
446 | case KVM_SET_TSS_ADDR: | ||
447 | r = kvm_vm_ioctl_set_tss_addr(kvm, arg); | ||
448 | if (r < 0) | ||
449 | goto out; | ||
450 | break; | ||
451 | case KVM_SET_MEMORY_REGION: { | ||
452 | struct kvm_memory_region kvm_mem; | ||
453 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
454 | |||
455 | r = -EFAULT; | ||
456 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | ||
457 | goto out; | ||
458 | kvm_userspace_mem.slot = kvm_mem.slot; | ||
459 | kvm_userspace_mem.flags = kvm_mem.flags; | ||
460 | kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr; | ||
461 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | ||
462 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); | ||
463 | if (r) | ||
464 | goto out; | ||
465 | break; | ||
466 | } | ||
467 | case KVM_SET_NR_MMU_PAGES: | ||
468 | r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); | ||
469 | if (r) | ||
470 | goto out; | ||
471 | break; | ||
472 | case KVM_GET_NR_MMU_PAGES: | ||
473 | r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); | ||
474 | break; | ||
475 | case KVM_SET_MEMORY_ALIAS: { | ||
476 | struct kvm_memory_alias alias; | ||
477 | |||
478 | r = -EFAULT; | ||
479 | if (copy_from_user(&alias, argp, sizeof alias)) | ||
480 | goto out; | ||
481 | r = kvm_vm_ioctl_set_memory_alias(kvm, &alias); | ||
482 | if (r) | ||
483 | goto out; | ||
484 | break; | ||
485 | } | ||
486 | case KVM_CREATE_IRQCHIP: | ||
487 | r = -ENOMEM; | ||
488 | kvm->vpic = kvm_create_pic(kvm); | ||
489 | if (kvm->vpic) { | ||
490 | r = kvm_ioapic_init(kvm); | ||
491 | if (r) { | ||
492 | kfree(kvm->vpic); | ||
493 | kvm->vpic = NULL; | ||
494 | goto out; | ||
495 | } | ||
496 | } else | ||
497 | goto out; | ||
498 | break; | ||
499 | case KVM_IRQ_LINE: { | ||
500 | struct kvm_irq_level irq_event; | ||
501 | |||
502 | r = -EFAULT; | ||
503 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | ||
504 | goto out; | ||
505 | if (irqchip_in_kernel(kvm)) { | ||
506 | mutex_lock(&kvm->lock); | ||
507 | if (irq_event.irq < 16) | ||
508 | kvm_pic_set_irq(pic_irqchip(kvm), | ||
509 | irq_event.irq, | ||
510 | irq_event.level); | ||
511 | kvm_ioapic_set_irq(kvm->vioapic, | ||
512 | irq_event.irq, | ||
513 | irq_event.level); | ||
514 | mutex_unlock(&kvm->lock); | ||
515 | r = 0; | ||
516 | } | ||
517 | break; | ||
518 | } | ||
519 | case KVM_GET_IRQCHIP: { | ||
520 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
521 | struct kvm_irqchip chip; | ||
522 | |||
523 | r = -EFAULT; | ||
524 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
525 | goto out; | ||
526 | r = -ENXIO; | ||
527 | if (!irqchip_in_kernel(kvm)) | ||
528 | goto out; | ||
529 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | ||
530 | if (r) | ||
531 | goto out; | ||
532 | r = -EFAULT; | ||
533 | if (copy_to_user(argp, &chip, sizeof chip)) | ||
534 | goto out; | ||
535 | r = 0; | ||
536 | break; | ||
537 | } | ||
538 | case KVM_SET_IRQCHIP: { | ||
539 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
540 | struct kvm_irqchip chip; | ||
541 | |||
542 | r = -EFAULT; | ||
543 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
544 | goto out; | ||
545 | r = -ENXIO; | ||
546 | if (!irqchip_in_kernel(kvm)) | ||
547 | goto out; | ||
548 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | ||
549 | if (r) | ||
550 | goto out; | ||
551 | r = 0; | ||
552 | break; | ||
553 | } | ||
554 | default: | ||
555 | ; | ||
556 | } | ||
557 | out: | ||
558 | return r; | ||
559 | } | ||
560 | |||
303 | static __init void kvm_init_msr_list(void) | 561 | static __init void kvm_init_msr_list(void) |
304 | { | 562 | { |
305 | u32 dummy[2]; | 563 | u32 dummy[2]; |