diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-20 17:57:43 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-20 17:57:43 -0500 |
| commit | a4cc3889f7e2c3f2fd15b492822c889fed5e1800 (patch) | |
| tree | d455a7392bc48aa44f61346e09b9cab63522d5b9 | |
| parent | bb893d15b564f7711b60e0bc12966d049980582d (diff) | |
| parent | 95ef1e52922cf75b1ea2eae54ef886f2cc47eecb (diff) | |
Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM guest: prevent tracing recursion with kvmclock
Revert "KVM: PPC: Add support for explicit HIOR setting"
KVM: VMX: Check for automatic switch msr table overflow
KVM: VMX: Add support for guest/host-only profiling
KVM: VMX: add support for switching of PERF_GLOBAL_CTRL
KVM: s390: announce SYNC_MMU
KVM: s390: Fix tprot locking
KVM: s390: handle SIGP sense running intercepts
KVM: s390: Fix RUNNING flag misinterpretation
| -rw-r--r-- | arch/powerpc/include/asm/kvm.h | 8 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 14 | ||||
| -rw-r--r-- | arch/powerpc/kvm/powerpc.c | 1 | ||||
| -rw-r--r-- | arch/s390/include/asm/kvm_host.h | 3 | ||||
| -rw-r--r-- | arch/s390/kvm/diag.c | 2 | ||||
| -rw-r--r-- | arch/s390/kvm/intercept.c | 3 | ||||
| -rw-r--r-- | arch/s390/kvm/interrupt.c | 1 | ||||
| -rw-r--r-- | arch/s390/kvm/kvm-s390.c | 12 | ||||
| -rw-r--r-- | arch/s390/kvm/priv.c | 10 | ||||
| -rw-r--r-- | arch/s390/kvm/sigp.c | 45 | ||||
| -rw-r--r-- | arch/x86/kernel/kvmclock.c | 5 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 131 | ||||
| -rw-r--r-- | include/linux/kvm.h | 1 |
14 files changed, 189 insertions, 49 deletions
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 08fe69edcd10..0ad432bc81d6 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
| @@ -149,12 +149,6 @@ struct kvm_regs { | |||
| 149 | #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) | 149 | #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) |
| 150 | 150 | ||
| 151 | /* | 151 | /* |
| 152 | * Book3S special bits to indicate contents in the struct by maintaining | ||
| 153 | * backwards compatibility with older structs. If adding a new field, | ||
| 154 | * please make sure to add a flag for that new field */ | ||
| 155 | #define KVM_SREGS_S_HIOR (1 << 0) | ||
| 156 | |||
| 157 | /* | ||
| 158 | * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a | 152 | * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a |
| 159 | * previous KVM_GET_REGS. | 153 | * previous KVM_GET_REGS. |
| 160 | * | 154 | * |
| @@ -179,8 +173,6 @@ struct kvm_sregs { | |||
| 179 | __u64 ibat[8]; | 173 | __u64 ibat[8]; |
| 180 | __u64 dbat[8]; | 174 | __u64 dbat[8]; |
| 181 | } ppc32; | 175 | } ppc32; |
| 182 | __u64 flags; /* KVM_SREGS_S_ */ | ||
| 183 | __u64 hior; | ||
| 184 | } s; | 176 | } s; |
| 185 | struct { | 177 | struct { |
| 186 | union { | 178 | union { |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index a384ffdf33de..d4df013ad779 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s { | |||
| 90 | #endif | 90 | #endif |
| 91 | int context_id[SID_CONTEXTS]; | 91 | int context_id[SID_CONTEXTS]; |
| 92 | 92 | ||
| 93 | bool hior_sregs; /* HIOR is set by SREGS, not PVR */ | ||
| 94 | |||
| 95 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | 93 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
| 96 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | 94 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; |
| 97 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 95 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index bc4d50dec78b..3c791e1eb675 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -151,16 +151,14 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
| 151 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 152 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | 152 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { |
| 153 | kvmppc_mmu_book3s_64_init(vcpu); | 153 | kvmppc_mmu_book3s_64_init(vcpu); |
| 154 | if (!to_book3s(vcpu)->hior_sregs) | 154 | to_book3s(vcpu)->hior = 0xfff00000; |
| 155 | to_book3s(vcpu)->hior = 0xfff00000; | ||
| 156 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | 155 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
| 157 | vcpu->arch.cpu_type = KVM_CPU_3S_64; | 156 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
| 158 | } else | 157 | } else |
| 159 | #endif | 158 | #endif |
| 160 | { | 159 | { |
| 161 | kvmppc_mmu_book3s_32_init(vcpu); | 160 | kvmppc_mmu_book3s_32_init(vcpu); |
| 162 | if (!to_book3s(vcpu)->hior_sregs) | 161 | to_book3s(vcpu)->hior = 0; |
| 163 | to_book3s(vcpu)->hior = 0; | ||
| 164 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | 162 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
| 165 | vcpu->arch.cpu_type = KVM_CPU_3S_32; | 163 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
| 166 | } | 164 | } |
| @@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
| 797 | } | 795 | } |
| 798 | } | 796 | } |
| 799 | 797 | ||
| 800 | if (sregs->u.s.flags & KVM_SREGS_S_HIOR) | ||
| 801 | sregs->u.s.hior = to_book3s(vcpu)->hior; | ||
| 802 | |||
| 803 | return 0; | 798 | return 0; |
| 804 | } | 799 | } |
| 805 | 800 | ||
| @@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
| 836 | /* Flush the MMU after messing with the segments */ | 831 | /* Flush the MMU after messing with the segments */ |
| 837 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 832 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
| 838 | 833 | ||
| 839 | if (sregs->u.s.flags & KVM_SREGS_S_HIOR) { | ||
| 840 | to_book3s(vcpu)->hior_sregs = true; | ||
| 841 | to_book3s(vcpu)->hior = sregs->u.s.hior; | ||
| 842 | } | ||
| 843 | |||
| 844 | return 0; | 834 | return 0; |
| 845 | } | 835 | } |
| 846 | 836 | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index efbf9ad87203..607fbdf24b84 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
| 208 | case KVM_CAP_PPC_BOOKE_SREGS: | 208 | case KVM_CAP_PPC_BOOKE_SREGS: |
| 209 | #else | 209 | #else |
| 210 | case KVM_CAP_PPC_SEGSTATE: | 210 | case KVM_CAP_PPC_SEGSTATE: |
| 211 | case KVM_CAP_PPC_HIOR: | ||
| 212 | case KVM_CAP_PPC_PAPR: | 211 | case KVM_CAP_PPC_PAPR: |
| 213 | #endif | 212 | #endif |
| 214 | case KVM_CAP_PPC_UNSET_IRQ: | 213 | case KVM_CAP_PPC_UNSET_IRQ: |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 24e18473d926..b0c235cb6ad5 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
| @@ -47,7 +47,7 @@ struct sca_block { | |||
| 47 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | 47 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
| 48 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | 48 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
| 49 | 49 | ||
| 50 | #define CPUSTAT_HOST 0x80000000 | 50 | #define CPUSTAT_STOPPED 0x80000000 |
| 51 | #define CPUSTAT_WAIT 0x10000000 | 51 | #define CPUSTAT_WAIT 0x10000000 |
| 52 | #define CPUSTAT_ECALL_PEND 0x08000000 | 52 | #define CPUSTAT_ECALL_PEND 0x08000000 |
| 53 | #define CPUSTAT_STOP_INT 0x04000000 | 53 | #define CPUSTAT_STOP_INT 0x04000000 |
| @@ -139,6 +139,7 @@ struct kvm_vcpu_stat { | |||
| 139 | u32 instruction_stfl; | 139 | u32 instruction_stfl; |
| 140 | u32 instruction_tprot; | 140 | u32 instruction_tprot; |
| 141 | u32 instruction_sigp_sense; | 141 | u32 instruction_sigp_sense; |
| 142 | u32 instruction_sigp_sense_running; | ||
| 142 | u32 instruction_sigp_external_call; | 143 | u32 instruction_sigp_external_call; |
| 143 | u32 instruction_sigp_emergency; | 144 | u32 instruction_sigp_emergency; |
| 144 | u32 instruction_sigp_stop; | 145 | u32 instruction_sigp_stop; |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 87cedd61be04..8943e82cd4d9 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
| @@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
| 70 | return -EOPNOTSUPP; | 70 | return -EOPNOTSUPP; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 73 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
| 74 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 74 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; |
| 75 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 75 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
| 76 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 76 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index c7c51898984e..02434543eabb 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
| @@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
| 132 | int rc = 0; | 132 | int rc = 0; |
| 133 | 133 | ||
| 134 | vcpu->stat.exit_stop_request++; | 134 | vcpu->stat.exit_stop_request++; |
| 135 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
| 136 | spin_lock_bh(&vcpu->arch.local_int.lock); | 135 | spin_lock_bh(&vcpu->arch.local_int.lock); |
| 137 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | 136 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { |
| 138 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | 137 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; |
| @@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
| 149 | } | 148 | } |
| 150 | 149 | ||
| 151 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 150 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { |
| 151 | atomic_set_mask(CPUSTAT_STOPPED, | ||
| 152 | &vcpu->arch.sie_block->cpuflags); | ||
| 152 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | 153 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; |
| 153 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | 154 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); |
| 154 | rc = -EOPNOTSUPP; | 155 | rc = -EOPNOTSUPP; |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 87c16705b381..278ee009ce65 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
| @@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
| 252 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); | 252 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); |
| 253 | if (rc == -EFAULT) | 253 | if (rc == -EFAULT) |
| 254 | exception = 1; | 254 | exception = 1; |
| 255 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
| 255 | break; | 256 | break; |
| 256 | 257 | ||
| 257 | case KVM_S390_PROGRAM_INT: | 258 | case KVM_S390_PROGRAM_INT: |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0bd3bea1e4cd..d1c445732451 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
| 65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | 65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
| 66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, | 66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, |
| 67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | 67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
| 68 | { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, | ||
| 68 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, | 69 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, |
| 69 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | 70 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
| 70 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 71 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
| @@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
| 127 | switch (ext) { | 128 | switch (ext) { |
| 128 | case KVM_CAP_S390_PSW: | 129 | case KVM_CAP_S390_PSW: |
| 129 | case KVM_CAP_S390_GMAP: | 130 | case KVM_CAP_S390_GMAP: |
| 131 | case KVM_CAP_SYNC_MMU: | ||
| 130 | r = 1; | 132 | r = 1; |
| 131 | break; | 133 | break; |
| 132 | default: | 134 | default: |
| @@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 270 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 272 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
| 271 | restore_access_regs(vcpu->arch.guest_acrs); | 273 | restore_access_regs(vcpu->arch.guest_acrs); |
| 272 | gmap_enable(vcpu->arch.gmap); | 274 | gmap_enable(vcpu->arch.gmap); |
| 275 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
| 273 | } | 276 | } |
| 274 | 277 | ||
| 275 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 278 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 276 | { | 279 | { |
| 280 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
| 277 | gmap_disable(vcpu->arch.gmap); | 281 | gmap_disable(vcpu->arch.gmap); |
| 278 | save_fp_regs(&vcpu->arch.guest_fpregs); | 282 | save_fp_regs(&vcpu->arch.guest_fpregs); |
| 279 | save_access_regs(vcpu->arch.guest_acrs); | 283 | save_access_regs(vcpu->arch.guest_acrs); |
| @@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
| 301 | 305 | ||
| 302 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 306 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
| 303 | { | 307 | { |
| 304 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); | 308 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
| 309 | CPUSTAT_SM | | ||
| 310 | CPUSTAT_STOPPED); | ||
| 305 | vcpu->arch.sie_block->ecb = 6; | 311 | vcpu->arch.sie_block->ecb = 6; |
| 306 | vcpu->arch.sie_block->eca = 0xC1002001U; | 312 | vcpu->arch.sie_block->eca = 0xC1002001U; |
| 307 | vcpu->arch.sie_block->fac = (int) (long) facilities; | 313 | vcpu->arch.sie_block->fac = (int) (long) facilities; |
| @@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
| 428 | { | 434 | { |
| 429 | int rc = 0; | 435 | int rc = 0; |
| 430 | 436 | ||
| 431 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | 437 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) |
| 432 | rc = -EBUSY; | 438 | rc = -EBUSY; |
| 433 | else { | 439 | else { |
| 434 | vcpu->run->psw_mask = psw.mask; | 440 | vcpu->run->psw_mask = psw.mask; |
| @@ -501,7 +507,7 @@ rerun_vcpu: | |||
| 501 | if (vcpu->sigset_active) | 507 | if (vcpu->sigset_active) |
| 502 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 508 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
| 503 | 509 | ||
| 504 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 510 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
| 505 | 511 | ||
| 506 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); | 512 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); |
| 507 | 513 | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 391626361084..d02638959922 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
| @@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
| 336 | u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; | 336 | u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; |
| 337 | u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; | 337 | u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; |
| 338 | struct vm_area_struct *vma; | 338 | struct vm_area_struct *vma; |
| 339 | unsigned long user_address; | ||
| 339 | 340 | ||
| 340 | vcpu->stat.instruction_tprot++; | 341 | vcpu->stat.instruction_tprot++; |
| 341 | 342 | ||
| @@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
| 349 | return -EOPNOTSUPP; | 350 | return -EOPNOTSUPP; |
| 350 | 351 | ||
| 351 | 352 | ||
| 353 | /* we must resolve the address without holding the mmap semaphore. | ||
| 354 | * This is ok since the userspace hypervisor is not supposed to change | ||
| 355 | * the mapping while the guest queries the memory. Otherwise the guest | ||
| 356 | * might crash or get wrong info anyway. */ | ||
| 357 | user_address = (unsigned long) __guestaddr_to_user(vcpu, address1); | ||
| 358 | |||
| 352 | down_read(¤t->mm->mmap_sem); | 359 | down_read(¤t->mm->mmap_sem); |
| 353 | vma = find_vma(current->mm, | 360 | vma = find_vma(current->mm, user_address); |
| 354 | (unsigned long) __guestaddr_to_user(vcpu, address1)); | ||
| 355 | if (!vma) { | 361 | if (!vma) { |
| 356 | up_read(¤t->mm->mmap_sem); | 362 | up_read(¤t->mm->mmap_sem); |
| 357 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 363 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f815118835f3..0a7941d74bc6 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
| @@ -31,9 +31,11 @@ | |||
| 31 | #define SIGP_SET_PREFIX 0x0d | 31 | #define SIGP_SET_PREFIX 0x0d |
| 32 | #define SIGP_STORE_STATUS_ADDR 0x0e | 32 | #define SIGP_STORE_STATUS_ADDR 0x0e |
| 33 | #define SIGP_SET_ARCH 0x12 | 33 | #define SIGP_SET_ARCH 0x12 |
| 34 | #define SIGP_SENSE_RUNNING 0x15 | ||
| 34 | 35 | ||
| 35 | /* cpu status bits */ | 36 | /* cpu status bits */ |
| 36 | #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL | 37 | #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL |
| 38 | #define SIGP_STAT_NOT_RUNNING 0x00000400UL | ||
| 37 | #define SIGP_STAT_INCORRECT_STATE 0x00000200UL | 39 | #define SIGP_STAT_INCORRECT_STATE 0x00000200UL |
| 38 | #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL | 40 | #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL |
| 39 | #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL | 41 | #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL |
| @@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
| 57 | spin_lock(&fi->lock); | 59 | spin_lock(&fi->lock); |
| 58 | if (fi->local_int[cpu_addr] == NULL) | 60 | if (fi->local_int[cpu_addr] == NULL) |
| 59 | rc = 3; /* not operational */ | 61 | rc = 3; /* not operational */ |
| 60 | else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | 62 | else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) |
| 61 | & CPUSTAT_RUNNING) { | 63 | & CPUSTAT_STOPPED)) { |
| 62 | *reg &= 0xffffffff00000000UL; | 64 | *reg &= 0xffffffff00000000UL; |
| 63 | rc = 1; /* status stored */ | 65 | rc = 1; /* status stored */ |
| 64 | } else { | 66 | } else { |
| @@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
| 251 | 253 | ||
| 252 | spin_lock_bh(&li->lock); | 254 | spin_lock_bh(&li->lock); |
| 253 | /* cpu must be in stopped state */ | 255 | /* cpu must be in stopped state */ |
| 254 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { | 256 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
| 255 | rc = 1; /* incorrect state */ | 257 | rc = 1; /* incorrect state */ |
| 256 | *reg &= SIGP_STAT_INCORRECT_STATE; | 258 | *reg &= SIGP_STAT_INCORRECT_STATE; |
| 257 | kfree(inti); | 259 | kfree(inti); |
| @@ -275,6 +277,38 @@ out_fi: | |||
| 275 | return rc; | 277 | return rc; |
| 276 | } | 278 | } |
| 277 | 279 | ||
| 280 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | ||
| 281 | unsigned long *reg) | ||
| 282 | { | ||
| 283 | int rc; | ||
| 284 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
| 285 | |||
| 286 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
| 287 | return 3; /* not operational */ | ||
| 288 | |||
| 289 | spin_lock(&fi->lock); | ||
| 290 | if (fi->local_int[cpu_addr] == NULL) | ||
| 291 | rc = 3; /* not operational */ | ||
| 292 | else { | ||
| 293 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | ||
| 294 | & CPUSTAT_RUNNING) { | ||
| 295 | /* running */ | ||
| 296 | rc = 1; | ||
| 297 | } else { | ||
| 298 | /* not running */ | ||
| 299 | *reg &= 0xffffffff00000000UL; | ||
| 300 | *reg |= SIGP_STAT_NOT_RUNNING; | ||
| 301 | rc = 0; | ||
| 302 | } | ||
| 303 | } | ||
| 304 | spin_unlock(&fi->lock); | ||
| 305 | |||
| 306 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, | ||
| 307 | rc); | ||
| 308 | |||
| 309 | return rc; | ||
| 310 | } | ||
| 311 | |||
| 278 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | 312 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) |
| 279 | { | 313 | { |
| 280 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 314 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
| @@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
| 331 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, | 365 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, |
| 332 | &vcpu->arch.guest_gprs[r1]); | 366 | &vcpu->arch.guest_gprs[r1]); |
| 333 | break; | 367 | break; |
| 368 | case SIGP_SENSE_RUNNING: | ||
| 369 | vcpu->stat.instruction_sigp_sense_running++; | ||
| 370 | rc = __sigp_sense_running(vcpu, cpu_addr, | ||
| 371 | &vcpu->arch.guest_gprs[r1]); | ||
| 372 | break; | ||
| 334 | case SIGP_RESTART: | 373 | case SIGP_RESTART: |
| 335 | vcpu->stat.instruction_sigp_restart++; | 374 | vcpu->stat.instruction_sigp_restart++; |
| 336 | /* user space must know about restart */ | 375 | /* user space must know about restart */ |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index c1a0188e29ae..44842d756b29 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void) | |||
| 74 | struct pvclock_vcpu_time_info *src; | 74 | struct pvclock_vcpu_time_info *src; |
| 75 | cycle_t ret; | 75 | cycle_t ret; |
| 76 | 76 | ||
| 77 | src = &get_cpu_var(hv_clock); | 77 | preempt_disable_notrace(); |
| 78 | src = &__get_cpu_var(hv_clock); | ||
| 78 | ret = pvclock_clocksource_read(src); | 79 | ret = pvclock_clocksource_read(src); |
| 79 | put_cpu_var(hv_clock); | 80 | preempt_enable_notrace(); |
| 80 | return ret; | 81 | return ret; |
| 81 | } | 82 | } |
| 82 | 83 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a0d6bd9ad442..579a0b51696a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <asm/mce.h> | 39 | #include <asm/mce.h> |
| 40 | #include <asm/i387.h> | 40 | #include <asm/i387.h> |
| 41 | #include <asm/xcr.h> | 41 | #include <asm/xcr.h> |
| 42 | #include <asm/perf_event.h> | ||
| 42 | 43 | ||
| 43 | #include "trace.h" | 44 | #include "trace.h" |
| 44 | 45 | ||
| @@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO); | |||
| 118 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; | 119 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
| 119 | module_param(ple_window, int, S_IRUGO); | 120 | module_param(ple_window, int, S_IRUGO); |
| 120 | 121 | ||
| 121 | #define NR_AUTOLOAD_MSRS 1 | 122 | #define NR_AUTOLOAD_MSRS 8 |
| 122 | #define VMCS02_POOL_SIZE 1 | 123 | #define VMCS02_POOL_SIZE 1 |
| 123 | 124 | ||
| 124 | struct vmcs { | 125 | struct vmcs { |
| @@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy; | |||
| 622 | static unsigned long *vmx_msr_bitmap_longmode; | 623 | static unsigned long *vmx_msr_bitmap_longmode; |
| 623 | 624 | ||
| 624 | static bool cpu_has_load_ia32_efer; | 625 | static bool cpu_has_load_ia32_efer; |
| 626 | static bool cpu_has_load_perf_global_ctrl; | ||
| 625 | 627 | ||
| 626 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); | 628 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
| 627 | static DEFINE_SPINLOCK(vmx_vpid_lock); | 629 | static DEFINE_SPINLOCK(vmx_vpid_lock); |
| @@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
| 1191 | vmcs_write32(EXCEPTION_BITMAP, eb); | 1193 | vmcs_write32(EXCEPTION_BITMAP, eb); |
| 1192 | } | 1194 | } |
| 1193 | 1195 | ||
| 1196 | static void clear_atomic_switch_msr_special(unsigned long entry, | ||
| 1197 | unsigned long exit) | ||
| 1198 | { | ||
| 1199 | vmcs_clear_bits(VM_ENTRY_CONTROLS, entry); | ||
| 1200 | vmcs_clear_bits(VM_EXIT_CONTROLS, exit); | ||
| 1201 | } | ||
| 1202 | |||
| 1194 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | 1203 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
| 1195 | { | 1204 | { |
| 1196 | unsigned i; | 1205 | unsigned i; |
| 1197 | struct msr_autoload *m = &vmx->msr_autoload; | 1206 | struct msr_autoload *m = &vmx->msr_autoload; |
| 1198 | 1207 | ||
| 1199 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | 1208 | switch (msr) { |
| 1200 | vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | 1209 | case MSR_EFER: |
| 1201 | vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | 1210 | if (cpu_has_load_ia32_efer) { |
| 1202 | return; | 1211 | clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, |
| 1212 | VM_EXIT_LOAD_IA32_EFER); | ||
| 1213 | return; | ||
| 1214 | } | ||
| 1215 | break; | ||
| 1216 | case MSR_CORE_PERF_GLOBAL_CTRL: | ||
| 1217 | if (cpu_has_load_perf_global_ctrl) { | ||
| 1218 | clear_atomic_switch_msr_special( | ||
| 1219 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
| 1220 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); | ||
| 1221 | return; | ||
| 1222 | } | ||
| 1223 | break; | ||
| 1203 | } | 1224 | } |
| 1204 | 1225 | ||
| 1205 | for (i = 0; i < m->nr; ++i) | 1226 | for (i = 0; i < m->nr; ++i) |
| @@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | |||
| 1215 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | 1236 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
| 1216 | } | 1237 | } |
| 1217 | 1238 | ||
| 1239 | static void add_atomic_switch_msr_special(unsigned long entry, | ||
| 1240 | unsigned long exit, unsigned long guest_val_vmcs, | ||
| 1241 | unsigned long host_val_vmcs, u64 guest_val, u64 host_val) | ||
| 1242 | { | ||
| 1243 | vmcs_write64(guest_val_vmcs, guest_val); | ||
| 1244 | vmcs_write64(host_val_vmcs, host_val); | ||
| 1245 | vmcs_set_bits(VM_ENTRY_CONTROLS, entry); | ||
| 1246 | vmcs_set_bits(VM_EXIT_CONTROLS, exit); | ||
| 1247 | } | ||
| 1248 | |||
| 1218 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | 1249 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
| 1219 | u64 guest_val, u64 host_val) | 1250 | u64 guest_val, u64 host_val) |
| 1220 | { | 1251 | { |
| 1221 | unsigned i; | 1252 | unsigned i; |
| 1222 | struct msr_autoload *m = &vmx->msr_autoload; | 1253 | struct msr_autoload *m = &vmx->msr_autoload; |
| 1223 | 1254 | ||
| 1224 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | 1255 | switch (msr) { |
| 1225 | vmcs_write64(GUEST_IA32_EFER, guest_val); | 1256 | case MSR_EFER: |
| 1226 | vmcs_write64(HOST_IA32_EFER, host_val); | 1257 | if (cpu_has_load_ia32_efer) { |
| 1227 | vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | 1258 | add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, |
| 1228 | vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | 1259 | VM_EXIT_LOAD_IA32_EFER, |
| 1229 | return; | 1260 | GUEST_IA32_EFER, |
| 1261 | HOST_IA32_EFER, | ||
| 1262 | guest_val, host_val); | ||
| 1263 | return; | ||
| 1264 | } | ||
| 1265 | break; | ||
| 1266 | case MSR_CORE_PERF_GLOBAL_CTRL: | ||
| 1267 | if (cpu_has_load_perf_global_ctrl) { | ||
| 1268 | add_atomic_switch_msr_special( | ||
| 1269 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
| 1270 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
| 1271 | GUEST_IA32_PERF_GLOBAL_CTRL, | ||
| 1272 | HOST_IA32_PERF_GLOBAL_CTRL, | ||
| 1273 | guest_val, host_val); | ||
| 1274 | return; | ||
| 1275 | } | ||
| 1276 | break; | ||
| 1230 | } | 1277 | } |
| 1231 | 1278 | ||
| 1232 | for (i = 0; i < m->nr; ++i) | 1279 | for (i = 0; i < m->nr; ++i) |
| 1233 | if (m->guest[i].index == msr) | 1280 | if (m->guest[i].index == msr) |
| 1234 | break; | 1281 | break; |
| 1235 | 1282 | ||
| 1236 | if (i == m->nr) { | 1283 | if (i == NR_AUTOLOAD_MSRS) { |
| 1284 | printk_once(KERN_WARNING"Not enough mst switch entries. " | ||
| 1285 | "Can't add msr %x\n", msr); | ||
| 1286 | return; | ||
| 1287 | } else if (i == m->nr) { | ||
| 1237 | ++m->nr; | 1288 | ++m->nr; |
| 1238 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); | 1289 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
| 1239 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | 1290 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
| @@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
| 2455 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, | 2506 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, |
| 2456 | VM_EXIT_LOAD_IA32_EFER); | 2507 | VM_EXIT_LOAD_IA32_EFER); |
| 2457 | 2508 | ||
| 2509 | cpu_has_load_perf_global_ctrl = | ||
| 2510 | allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, | ||
| 2511 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) | ||
| 2512 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, | ||
| 2513 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); | ||
| 2514 | |||
| 2515 | /* | ||
| 2516 | * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL | ||
| 2517 | * but due to arrata below it can't be used. Workaround is to use | ||
| 2518 | * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. | ||
| 2519 | * | ||
| 2520 | * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] | ||
| 2521 | * | ||
| 2522 | * AAK155 (model 26) | ||
| 2523 | * AAP115 (model 30) | ||
| 2524 | * AAT100 (model 37) | ||
| 2525 | * BC86,AAY89,BD102 (model 44) | ||
| 2526 | * BA97 (model 46) | ||
| 2527 | * | ||
| 2528 | */ | ||
| 2529 | if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { | ||
| 2530 | switch (boot_cpu_data.x86_model) { | ||
| 2531 | case 26: | ||
| 2532 | case 30: | ||
| 2533 | case 37: | ||
| 2534 | case 44: | ||
| 2535 | case 46: | ||
| 2536 | cpu_has_load_perf_global_ctrl = false; | ||
| 2537 | printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " | ||
| 2538 | "does not work properly. Using workaround\n"); | ||
| 2539 | break; | ||
| 2540 | default: | ||
| 2541 | break; | ||
| 2542 | } | ||
| 2543 | } | ||
| 2544 | |||
| 2458 | return 0; | 2545 | return 0; |
| 2459 | } | 2546 | } |
| 2460 | 2547 | ||
| @@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) | |||
| 5968 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); | 6055 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); |
| 5969 | } | 6056 | } |
| 5970 | 6057 | ||
| 6058 | static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) | ||
| 6059 | { | ||
| 6060 | int i, nr_msrs; | ||
| 6061 | struct perf_guest_switch_msr *msrs; | ||
| 6062 | |||
| 6063 | msrs = perf_guest_get_msrs(&nr_msrs); | ||
| 6064 | |||
| 6065 | if (!msrs) | ||
| 6066 | return; | ||
| 6067 | |||
| 6068 | for (i = 0; i < nr_msrs; i++) | ||
| 6069 | if (msrs[i].host == msrs[i].guest) | ||
| 6070 | clear_atomic_switch_msr(vmx, msrs[i].msr); | ||
| 6071 | else | ||
| 6072 | add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, | ||
| 6073 | msrs[i].host); | ||
| 6074 | } | ||
| 6075 | |||
| 5971 | #ifdef CONFIG_X86_64 | 6076 | #ifdef CONFIG_X86_64 |
| 5972 | #define R "r" | 6077 | #define R "r" |
| 5973 | #define Q "q" | 6078 | #define Q "q" |
| @@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6017 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 6122 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
| 6018 | vmx_set_interrupt_shadow(vcpu, 0); | 6123 | vmx_set_interrupt_shadow(vcpu, 0); |
| 6019 | 6124 | ||
| 6125 | atomic_switch_perf_msrs(vmx); | ||
| 6126 | |||
| 6020 | vmx->__launched = vmx->loaded_vmcs->launched; | 6127 | vmx->__launched = vmx->loaded_vmcs->launched; |
| 6021 | asm( | 6128 | asm( |
| 6022 | /* Store host registers */ | 6129 | /* Store host registers */ |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f47fcd30273d..c3892fc1d538 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
| @@ -555,7 +555,6 @@ struct kvm_ppc_pvinfo { | |||
| 555 | #define KVM_CAP_PPC_SMT 64 | 555 | #define KVM_CAP_PPC_SMT 64 |
| 556 | #define KVM_CAP_PPC_RMA 65 | 556 | #define KVM_CAP_PPC_RMA 65 |
| 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ | 557 | #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ |
| 558 | #define KVM_CAP_PPC_HIOR 67 | ||
| 559 | #define KVM_CAP_PPC_PAPR 68 | 558 | #define KVM_CAP_PPC_PAPR 68 |
| 560 | #define KVM_CAP_S390_GMAP 71 | 559 | #define KVM_CAP_S390_GMAP 71 |
| 561 | 560 | ||
