diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 15:01:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 15:01:20 -0400 |
commit | 5fecc9d8f59e765c2a48379dd7c6f5cf88c7d75a (patch) | |
tree | d1fc25d9650d3ac24591bba6f5e2e7a1afc54796 /arch/s390/kvm | |
parent | 3c4cfadef6a1665d9cd02a543782d03d3e6740c6 (diff) | |
parent | 1a577b72475d161b6677c05abe57301362023bb2 (diff) |
Merge tag 'kvm-3.6-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Avi Kivity:
"Highlights include
- full big real mode emulation on pre-Westmere Intel hosts (can be
disabled with emulate_invalid_guest_state=0)
- relatively small ppc and s390 updates
- PCID/INVPCID support in guests
- EOI avoidance; 3.6 guests should perform better on 3.6 hosts on
interrupt intensive workloads)
- Lockless write faults during live migration
- EPT accessed/dirty bits support for new Intel processors"
Fix up conflicts in:
- Documentation/virtual/kvm/api.txt:
Stupid subchapter numbering, added next to each other.
- arch/powerpc/kvm/booke_interrupts.S:
PPC asm changes clashing with the KVM fixes
- arch/s390/include/asm/sigp.h, arch/s390/kvm/sigp.c:
Duplicated commits through the kvm tree and the s390 tree, with
subsequent edits in the KVM tree.
* tag 'kvm-3.6-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (93 commits)
KVM: fix race with level interrupts
x86, hyper: fix build with !CONFIG_KVM_GUEST
Revert "apic: fix kvm build on UP without IOAPIC"
KVM guest: switch to apic_set_eoi_write, apic_write
apic: add apic_set_eoi_write for PV use
KVM: VMX: Implement PCID/INVPCID for guests with EPT
KVM: Add x86_hyper_kvm to complete detect_hypervisor_platform check
KVM: PPC: Critical interrupt emulation support
KVM: PPC: e500mc: Fix tlbilx emulation for 64-bit guests
KVM: PPC64: booke: Set interrupt computation mode for 64-bit host
KVM: PPC: bookehv: Add ESR flag to Data Storage Interrupt
KVM: PPC: bookehv64: Add support for std/ld emulation.
booke: Added crit/mc exception handler for e500v2
booke/bookehv: Add host crit-watchdog exception support
KVM: MMU: document mmu-lock and fast page fault
KVM: MMU: fix kvm_mmu_pagetable_walk tracepoint
KVM: MMU: trace fast page fault
KVM: MMU: fast path of handling guest page fault
KVM: MMU: introduce SPTE_MMU_WRITEABLE bit
KVM: MMU: fold tlb flush judgement into mmu_spte_update
...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 77 |
2 files changed, 43 insertions, 35 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c552d1f4103f..d470ccbfabae 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -347,6 +347,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
347 | vcpu->arch.guest_fpregs.fpc = 0; | 347 | vcpu->arch.guest_fpregs.fpc = 0; |
348 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); | 348 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); |
349 | vcpu->arch.sie_block->gbea = 1; | 349 | vcpu->arch.sie_block->gbea = 1; |
350 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
350 | } | 351 | } |
351 | 352 | ||
352 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 353 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 1ab2ce1611c5..56f80e1f98f7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -26,19 +26,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
26 | int rc; | 26 | int rc; |
27 | 27 | ||
28 | if (cpu_addr >= KVM_MAX_VCPUS) | 28 | if (cpu_addr >= KVM_MAX_VCPUS) |
29 | return 3; /* not operational */ | 29 | return SIGP_CC_NOT_OPERATIONAL; |
30 | 30 | ||
31 | spin_lock(&fi->lock); | 31 | spin_lock(&fi->lock); |
32 | if (fi->local_int[cpu_addr] == NULL) | 32 | if (fi->local_int[cpu_addr] == NULL) |
33 | rc = 3; /* not operational */ | 33 | rc = SIGP_CC_NOT_OPERATIONAL; |
34 | else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) | 34 | else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) |
35 | & CPUSTAT_STOPPED)) { | 35 | & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) |
36 | *reg &= 0xffffffff00000000UL; | 36 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
37 | rc = 1; /* status stored */ | 37 | else { |
38 | } else { | ||
39 | *reg &= 0xffffffff00000000UL; | 38 | *reg &= 0xffffffff00000000UL; |
40 | *reg |= SIGP_STATUS_STOPPED; | 39 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) |
41 | rc = 1; /* status stored */ | 40 | & CPUSTAT_ECALL_PEND) |
41 | *reg |= SIGP_STATUS_EXT_CALL_PENDING; | ||
42 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | ||
43 | & CPUSTAT_STOPPED) | ||
44 | *reg |= SIGP_STATUS_STOPPED; | ||
45 | rc = SIGP_CC_STATUS_STORED; | ||
42 | } | 46 | } |
43 | spin_unlock(&fi->lock); | 47 | spin_unlock(&fi->lock); |
44 | 48 | ||
@@ -54,7 +58,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
54 | int rc; | 58 | int rc; |
55 | 59 | ||
56 | if (cpu_addr >= KVM_MAX_VCPUS) | 60 | if (cpu_addr >= KVM_MAX_VCPUS) |
57 | return 3; /* not operational */ | 61 | return SIGP_CC_NOT_OPERATIONAL; |
58 | 62 | ||
59 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 63 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
60 | if (!inti) | 64 | if (!inti) |
@@ -66,7 +70,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
66 | spin_lock(&fi->lock); | 70 | spin_lock(&fi->lock); |
67 | li = fi->local_int[cpu_addr]; | 71 | li = fi->local_int[cpu_addr]; |
68 | if (li == NULL) { | 72 | if (li == NULL) { |
69 | rc = 3; /* not operational */ | 73 | rc = SIGP_CC_NOT_OPERATIONAL; |
70 | kfree(inti); | 74 | kfree(inti); |
71 | goto unlock; | 75 | goto unlock; |
72 | } | 76 | } |
@@ -77,7 +81,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
77 | if (waitqueue_active(&li->wq)) | 81 | if (waitqueue_active(&li->wq)) |
78 | wake_up_interruptible(&li->wq); | 82 | wake_up_interruptible(&li->wq); |
79 | spin_unlock_bh(&li->lock); | 83 | spin_unlock_bh(&li->lock); |
80 | rc = 0; /* order accepted */ | 84 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
81 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | 85 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); |
82 | unlock: | 86 | unlock: |
83 | spin_unlock(&fi->lock); | 87 | spin_unlock(&fi->lock); |
@@ -92,7 +96,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
92 | int rc; | 96 | int rc; |
93 | 97 | ||
94 | if (cpu_addr >= KVM_MAX_VCPUS) | 98 | if (cpu_addr >= KVM_MAX_VCPUS) |
95 | return 3; /* not operational */ | 99 | return SIGP_CC_NOT_OPERATIONAL; |
96 | 100 | ||
97 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 101 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
98 | if (!inti) | 102 | if (!inti) |
@@ -104,7 +108,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
104 | spin_lock(&fi->lock); | 108 | spin_lock(&fi->lock); |
105 | li = fi->local_int[cpu_addr]; | 109 | li = fi->local_int[cpu_addr]; |
106 | if (li == NULL) { | 110 | if (li == NULL) { |
107 | rc = 3; /* not operational */ | 111 | rc = SIGP_CC_NOT_OPERATIONAL; |
108 | kfree(inti); | 112 | kfree(inti); |
109 | goto unlock; | 113 | goto unlock; |
110 | } | 114 | } |
@@ -115,7 +119,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
115 | if (waitqueue_active(&li->wq)) | 119 | if (waitqueue_active(&li->wq)) |
116 | wake_up_interruptible(&li->wq); | 120 | wake_up_interruptible(&li->wq); |
117 | spin_unlock_bh(&li->lock); | 121 | spin_unlock_bh(&li->lock); |
118 | rc = 0; /* order accepted */ | 122 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
119 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); | 123 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); |
120 | unlock: | 124 | unlock: |
121 | spin_unlock(&fi->lock); | 125 | spin_unlock(&fi->lock); |
@@ -143,7 +147,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
143 | out: | 147 | out: |
144 | spin_unlock_bh(&li->lock); | 148 | spin_unlock_bh(&li->lock); |
145 | 149 | ||
146 | return 0; /* order accepted */ | 150 | return SIGP_CC_ORDER_CODE_ACCEPTED; |
147 | } | 151 | } |
148 | 152 | ||
149 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | 153 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) |
@@ -153,12 +157,12 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | |||
153 | int rc; | 157 | int rc; |
154 | 158 | ||
155 | if (cpu_addr >= KVM_MAX_VCPUS) | 159 | if (cpu_addr >= KVM_MAX_VCPUS) |
156 | return 3; /* not operational */ | 160 | return SIGP_CC_NOT_OPERATIONAL; |
157 | 161 | ||
158 | spin_lock(&fi->lock); | 162 | spin_lock(&fi->lock); |
159 | li = fi->local_int[cpu_addr]; | 163 | li = fi->local_int[cpu_addr]; |
160 | if (li == NULL) { | 164 | if (li == NULL) { |
161 | rc = 3; /* not operational */ | 165 | rc = SIGP_CC_NOT_OPERATIONAL; |
162 | goto unlock; | 166 | goto unlock; |
163 | } | 167 | } |
164 | 168 | ||
@@ -182,11 +186,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
182 | 186 | ||
183 | switch (parameter & 0xff) { | 187 | switch (parameter & 0xff) { |
184 | case 0: | 188 | case 0: |
185 | rc = 3; /* not operational */ | 189 | rc = SIGP_CC_NOT_OPERATIONAL; |
186 | break; | 190 | break; |
187 | case 1: | 191 | case 1: |
188 | case 2: | 192 | case 2: |
189 | rc = 0; /* order accepted */ | 193 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
190 | break; | 194 | break; |
191 | default: | 195 | default: |
192 | rc = -EOPNOTSUPP; | 196 | rc = -EOPNOTSUPP; |
@@ -207,21 +211,23 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
207 | address = address & 0x7fffe000u; | 211 | address = address & 0x7fffe000u; |
208 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | 212 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || |
209 | copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { | 213 | copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { |
214 | *reg &= 0xffffffff00000000UL; | ||
210 | *reg |= SIGP_STATUS_INVALID_PARAMETER; | 215 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
211 | return 1; /* invalid parameter */ | 216 | return SIGP_CC_STATUS_STORED; |
212 | } | 217 | } |
213 | 218 | ||
214 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 219 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
215 | if (!inti) | 220 | if (!inti) |
216 | return 2; /* busy */ | 221 | return SIGP_CC_BUSY; |
217 | 222 | ||
218 | spin_lock(&fi->lock); | 223 | spin_lock(&fi->lock); |
219 | if (cpu_addr < KVM_MAX_VCPUS) | 224 | if (cpu_addr < KVM_MAX_VCPUS) |
220 | li = fi->local_int[cpu_addr]; | 225 | li = fi->local_int[cpu_addr]; |
221 | 226 | ||
222 | if (li == NULL) { | 227 | if (li == NULL) { |
223 | rc = 1; /* incorrect state */ | 228 | *reg &= 0xffffffff00000000UL; |
224 | *reg &= SIGP_STATUS_INCORRECT_STATE; | 229 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
230 | rc = SIGP_CC_STATUS_STORED; | ||
225 | kfree(inti); | 231 | kfree(inti); |
226 | goto out_fi; | 232 | goto out_fi; |
227 | } | 233 | } |
@@ -229,8 +235,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
229 | spin_lock_bh(&li->lock); | 235 | spin_lock_bh(&li->lock); |
230 | /* cpu must be in stopped state */ | 236 | /* cpu must be in stopped state */ |
231 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 237 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
232 | rc = 1; /* incorrect state */ | 238 | *reg &= 0xffffffff00000000UL; |
233 | *reg &= SIGP_STATUS_INCORRECT_STATE; | 239 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
240 | rc = SIGP_CC_STATUS_STORED; | ||
234 | kfree(inti); | 241 | kfree(inti); |
235 | goto out_li; | 242 | goto out_li; |
236 | } | 243 | } |
@@ -242,7 +249,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
242 | atomic_set(&li->active, 1); | 249 | atomic_set(&li->active, 1); |
243 | if (waitqueue_active(&li->wq)) | 250 | if (waitqueue_active(&li->wq)) |
244 | wake_up_interruptible(&li->wq); | 251 | wake_up_interruptible(&li->wq); |
245 | rc = 0; /* order accepted */ | 252 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
246 | 253 | ||
247 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 254 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); |
248 | out_li: | 255 | out_li: |
@@ -259,21 +266,21 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
259 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 266 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
260 | 267 | ||
261 | if (cpu_addr >= KVM_MAX_VCPUS) | 268 | if (cpu_addr >= KVM_MAX_VCPUS) |
262 | return 3; /* not operational */ | 269 | return SIGP_CC_NOT_OPERATIONAL; |
263 | 270 | ||
264 | spin_lock(&fi->lock); | 271 | spin_lock(&fi->lock); |
265 | if (fi->local_int[cpu_addr] == NULL) | 272 | if (fi->local_int[cpu_addr] == NULL) |
266 | rc = 3; /* not operational */ | 273 | rc = SIGP_CC_NOT_OPERATIONAL; |
267 | else { | 274 | else { |
268 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | 275 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) |
269 | & CPUSTAT_RUNNING) { | 276 | & CPUSTAT_RUNNING) { |
270 | /* running */ | 277 | /* running */ |
271 | rc = 1; | 278 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
272 | } else { | 279 | } else { |
273 | /* not running */ | 280 | /* not running */ |
274 | *reg &= 0xffffffff00000000UL; | 281 | *reg &= 0xffffffff00000000UL; |
275 | *reg |= SIGP_STATUS_NOT_RUNNING; | 282 | *reg |= SIGP_STATUS_NOT_RUNNING; |
276 | rc = 0; | 283 | rc = SIGP_CC_STATUS_STORED; |
277 | } | 284 | } |
278 | } | 285 | } |
279 | spin_unlock(&fi->lock); | 286 | spin_unlock(&fi->lock); |
@@ -286,23 +293,23 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
286 | 293 | ||
287 | static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) | 294 | static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) |
288 | { | 295 | { |
289 | int rc = 0; | ||
290 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 296 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
291 | struct kvm_s390_local_interrupt *li; | 297 | struct kvm_s390_local_interrupt *li; |
298 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | ||
292 | 299 | ||
293 | if (cpu_addr >= KVM_MAX_VCPUS) | 300 | if (cpu_addr >= KVM_MAX_VCPUS) |
294 | return 3; /* not operational */ | 301 | return SIGP_CC_NOT_OPERATIONAL; |
295 | 302 | ||
296 | spin_lock(&fi->lock); | 303 | spin_lock(&fi->lock); |
297 | li = fi->local_int[cpu_addr]; | 304 | li = fi->local_int[cpu_addr]; |
298 | if (li == NULL) { | 305 | if (li == NULL) { |
299 | rc = 3; /* not operational */ | 306 | rc = SIGP_CC_NOT_OPERATIONAL; |
300 | goto out; | 307 | goto out; |
301 | } | 308 | } |
302 | 309 | ||
303 | spin_lock_bh(&li->lock); | 310 | spin_lock_bh(&li->lock); |
304 | if (li->action_bits & ACTION_STOP_ON_STOP) | 311 | if (li->action_bits & ACTION_STOP_ON_STOP) |
305 | rc = 2; /* busy */ | 312 | rc = SIGP_CC_BUSY; |
306 | else | 313 | else |
307 | VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", | 314 | VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", |
308 | cpu_addr); | 315 | cpu_addr); |
@@ -377,7 +384,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
377 | case SIGP_RESTART: | 384 | case SIGP_RESTART: |
378 | vcpu->stat.instruction_sigp_restart++; | 385 | vcpu->stat.instruction_sigp_restart++; |
379 | rc = __sigp_restart(vcpu, cpu_addr); | 386 | rc = __sigp_restart(vcpu, cpu_addr); |
380 | if (rc == 2) /* busy */ | 387 | if (rc == SIGP_CC_BUSY) |
381 | break; | 388 | break; |
382 | /* user space must know about restart */ | 389 | /* user space must know about restart */ |
383 | default: | 390 | default: |