diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 16:21:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 16:21:40 -0400 |
commit | fe489bf4505ae26d3c6d6a1f1d3064c2a9c5cd85 (patch) | |
tree | 46596fd7edf7c4da1dafdb2c62011841e71cf32d /arch/s390/kvm/sigp.c | |
parent | 3e34131a65127e73fbae68c82748f32c8af7e4a4 (diff) | |
parent | a3ff5fbc94a829680d4aa005cd17add1c1a1fb5b (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini:
"On the x86 side, there are some optimizations and documentation
updates. The big ARM/KVM change for 3.11, support for AArch64, will
come through Catalin Marinas's tree. s390 and PPC have misc cleanups
and bugfixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (87 commits)
KVM: PPC: Ignore PIR writes
KVM: PPC: Book3S PR: Invalidate SLB entries properly
KVM: PPC: Book3S PR: Allow guest to use 1TB segments
KVM: PPC: Book3S PR: Don't keep scanning HPTEG after we find a match
KVM: PPC: Book3S PR: Fix invalidation of SLB entry 0 on guest entry
KVM: PPC: Book3S PR: Fix proto-VSID calculations
KVM: PPC: Guard doorbell exception with CONFIG_PPC_DOORBELL
KVM: Fix RTC interrupt coalescing tracking
kvm: Add a tracepoint write_tsc_offset
KVM: MMU: Inform users of mmio generation wraparound
KVM: MMU: document fast invalidate all mmio sptes
KVM: MMU: document fast invalidate all pages
KVM: MMU: document fast page fault
KVM: MMU: document mmio page fault
KVM: MMU: document write_flooding_count
KVM: MMU: document clear_spte_count
KVM: MMU: drop kvm_mmu_zap_mmio_sptes
KVM: MMU: init kvm generation close to mmio wrap-around value
KVM: MMU: add tracepoint for check_mmio_spte
KVM: MMU: fast invalidate all mmio sptes
...
Diffstat (limited to 'arch/s390/kvm/sigp.c')
-rw-r--r-- | arch/s390/kvm/sigp.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 1c48ab2845e0..bec398c57acf 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
79 | list_add_tail(&inti->list, &li->list); | 79 | list_add_tail(&inti->list, &li->list); |
80 | atomic_set(&li->active, 1); | 80 | atomic_set(&li->active, 1); |
81 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 81 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
82 | if (waitqueue_active(&li->wq)) | 82 | if (waitqueue_active(li->wq)) |
83 | wake_up_interruptible(&li->wq); | 83 | wake_up_interruptible(li->wq); |
84 | spin_unlock_bh(&li->lock); | 84 | spin_unlock_bh(&li->lock); |
85 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 85 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
86 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | 86 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); |
@@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
117 | list_add_tail(&inti->list, &li->list); | 117 | list_add_tail(&inti->list, &li->list); |
118 | atomic_set(&li->active, 1); | 118 | atomic_set(&li->active, 1); |
119 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 119 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
120 | if (waitqueue_active(&li->wq)) | 120 | if (waitqueue_active(li->wq)) |
121 | wake_up_interruptible(&li->wq); | 121 | wake_up_interruptible(li->wq); |
122 | spin_unlock_bh(&li->lock); | 122 | spin_unlock_bh(&li->lock); |
123 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 123 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
124 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); | 124 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); |
@@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
145 | atomic_set(&li->active, 1); | 145 | atomic_set(&li->active, 1); |
146 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 146 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
147 | li->action_bits |= action; | 147 | li->action_bits |= action; |
148 | if (waitqueue_active(&li->wq)) | 148 | if (waitqueue_active(li->wq)) |
149 | wake_up_interruptible(&li->wq); | 149 | wake_up_interruptible(li->wq); |
150 | out: | 150 | out: |
151 | spin_unlock_bh(&li->lock); | 151 | spin_unlock_bh(&li->lock); |
152 | 152 | ||
@@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
250 | 250 | ||
251 | list_add_tail(&inti->list, &li->list); | 251 | list_add_tail(&inti->list, &li->list); |
252 | atomic_set(&li->active, 1); | 252 | atomic_set(&li->active, 1); |
253 | if (waitqueue_active(&li->wq)) | 253 | if (waitqueue_active(li->wq)) |
254 | wake_up_interruptible(&li->wq); | 254 | wake_up_interruptible(li->wq); |
255 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 255 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
256 | 256 | ||
257 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 257 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); |
@@ -333,8 +333,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
333 | 333 | ||
334 | /* sigp in userspace can exit */ | 334 | /* sigp in userspace can exit */ |
335 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 335 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
336 | return kvm_s390_inject_program_int(vcpu, | 336 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
337 | PGM_PRIVILEGED_OPERATION); | ||
338 | 337 | ||
339 | order_code = kvm_s390_get_base_disp_rs(vcpu); | 338 | order_code = kvm_s390_get_base_disp_rs(vcpu); |
340 | 339 | ||