aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:43:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:43:43 -0400
commit69def9f05dfce3281bb06599057e6b8097385d39 (patch)
tree7d826b22924268ddbfad101993b248996d40e2ec /arch/s390
parent353f6dd2dec992ddd34620a94b051b0f76227379 (diff)
parent8e616fc8d343bd7f0f0a0c22407fdcb77f6d22b1 (diff)
Merge branch 'kvm-updates/2.6.32' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.32' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (202 commits) MAINTAINERS: update KVM entry KVM: correct error-handling code KVM: fix compile warnings on s390 KVM: VMX: Check cpl before emulating debug register access KVM: fix misreporting of coalesced interrupts by kvm tracer KVM: x86: drop duplicate kvm_flush_remote_tlb calls KVM: VMX: call vmx_load_host_state() only if msr is cached KVM: VMX: Conditionally reload debug register 6 KVM: Use thread debug register storage instead of kvm specific data KVM guest: do not batch pte updates from interrupt context KVM: Fix coalesced interrupt reporting in IOAPIC KVM guest: fix bogus wallclock physical address calculation KVM: VMX: Fix cr8 exiting control clobbering by EPT KVM: Optimize kvm_mmu_unprotect_page_virt() for tdp KVM: Document KVM_CAP_IRQCHIP KVM: Protect update_cr8_intercept() when running without an apic KVM: VMX: Fix EPT with WP bit change during paging KVM: Use kvm_{read,write}_guest_virt() to read and write segment descriptors KVM: x86 emulator: Add adc and sbb missing decoder flags KVM: Add missing #include ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm.h9
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/include/asm/kvm_para.h4
-rw-r--r--arch/s390/kvm/Kconfig9
-rw-r--r--arch/s390/kvm/gaccess.h23
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/interrupt.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c78
-rw-r--r--arch/s390/kvm/kvm-s390.h32
-rw-r--r--arch/s390/kvm/sigp.c60
10 files changed, 139 insertions, 117 deletions
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index 0b2f829f6d50..3dfcaeb5d7f4 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -15,15 +15,6 @@
15 */ 15 */
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
19struct kvm_pic_state {
20 /* no PIC for s390 */
21};
22
23struct kvm_ioapic_state {
24 /* no IOAPIC for s390 */
25};
26
27/* for KVM_GET_REGS and KVM_SET_REGS */ 18/* for KVM_GET_REGS and KVM_SET_REGS */
28struct kvm_regs { 19struct kvm_regs {
29 /* general purpose regs for s390 */ 20 /* general purpose regs for s390 */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 698988f69403..27605b62b980 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * asm-s390/kvm_host.h - definition for kernel virtual machines on s390 2 * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -40,7 +40,11 @@ struct sca_block {
40 struct sca_entry cpu[64]; 40 struct sca_entry cpu[64];
41} __attribute__((packed)); 41} __attribute__((packed));
42 42
43#define KVM_PAGES_PER_HPAGE 256 43#define KVM_NR_PAGE_SIZES 2
44#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
45#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
46#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
47#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
44 48
45#define CPUSTAT_HOST 0x80000000 49#define CPUSTAT_HOST 0x80000000
46#define CPUSTAT_WAIT 0x10000000 50#define CPUSTAT_WAIT 0x10000000
@@ -182,8 +186,9 @@ struct kvm_s390_interrupt_info {
182}; 186};
183 187
184/* for local_interrupt.action_flags */ 188/* for local_interrupt.action_flags */
185#define ACTION_STORE_ON_STOP 1 189#define ACTION_STORE_ON_STOP (1<<0)
186#define ACTION_STOP_ON_STOP 2 190#define ACTION_STOP_ON_STOP (1<<1)
191#define ACTION_RELOADVCPU_ON_STOP (1<<2)
187 192
188struct kvm_s390_local_interrupt { 193struct kvm_s390_local_interrupt {
189 spinlock_t lock; 194 spinlock_t lock;
@@ -227,8 +232,6 @@ struct kvm_vm_stat {
227}; 232};
228 233
229struct kvm_arch{ 234struct kvm_arch{
230 unsigned long guest_origin;
231 unsigned long guest_memsize;
232 struct sca_block *sca; 235 struct sca_block *sca;
233 debug_info_t *dbf; 236 debug_info_t *dbf;
234 struct kvm_s390_float_interrupt float_int; 237 struct kvm_s390_float_interrupt float_int;
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 2c503796b619..6964db226f83 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -13,6 +13,8 @@
13#ifndef __S390_KVM_PARA_H 13#ifndef __S390_KVM_PARA_H
14#define __S390_KVM_PARA_H 14#define __S390_KVM_PARA_H
15 15
16#ifdef __KERNEL__
17
16/* 18/*
17 * Hypercalls for KVM on s390. The calling convention is similar to the 19 * Hypercalls for KVM on s390. The calling convention is similar to the
18 * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1 20 * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
@@ -147,4 +149,6 @@ static inline unsigned int kvm_arch_para_features(void)
147 return 0; 149 return 0;
148} 150}
149 151
152#endif
153
150#endif /* __S390_KVM_PARA_H */ 154#endif /* __S390_KVM_PARA_H */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 3e260b7e37b2..bf164fc21864 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -1,11 +1,7 @@
1# 1#
2# KVM configuration 2# KVM configuration
3# 3#
4config HAVE_KVM 4source "virt/kvm/Kconfig"
5 bool
6
7config HAVE_KVM_IRQCHIP
8 bool
9 5
10menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
11 bool "Virtualization" 7 bool "Virtualization"
@@ -38,9 +34,6 @@ config KVM
38 34
39 If unsure, say N. 35 If unsure, say N.
40 36
41config KVM_TRACE
42 bool
43
44# OK, it's a little counter-intuitive to do this, but it puts it neatly under 37# OK, it's a little counter-intuitive to do this, but it puts it neatly under
45# the virtualization menu. 38# the virtualization menu.
46source drivers/virtio/Kconfig 39source drivers/virtio/Kconfig
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index ed60f3a74a85..03c716a0f01f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * gaccess.h - access guest memory 2 * gaccess.h - access guest memory
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -16,13 +16,14 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "kvm-s390.h"
19 20
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 unsigned long guestaddr) 22 unsigned long guestaddr)
22{ 23{
23 unsigned long prefix = vcpu->arch.sie_block->prefix; 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 unsigned long origin = vcpu->kvm->arch.guest_origin; 25 unsigned long origin = vcpu->arch.sie_block->gmsor;
25 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 26 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
26 27
27 if (guestaddr < 2 * PAGE_SIZE) 28 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix; 29 guestaddr += prefix;
@@ -158,8 +159,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
158 const void *from, unsigned long n) 159 const void *from, unsigned long n)
159{ 160{
160 unsigned long prefix = vcpu->arch.sie_block->prefix; 161 unsigned long prefix = vcpu->arch.sie_block->prefix;
161 unsigned long origin = vcpu->kvm->arch.guest_origin; 162 unsigned long origin = vcpu->arch.sie_block->gmsor;
162 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 163 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
163 164
164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 165 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
165 goto slowpath; 166 goto slowpath;
@@ -209,8 +210,8 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
209 unsigned long guestsrc, unsigned long n) 210 unsigned long guestsrc, unsigned long n)
210{ 211{
211 unsigned long prefix = vcpu->arch.sie_block->prefix; 212 unsigned long prefix = vcpu->arch.sie_block->prefix;
212 unsigned long origin = vcpu->kvm->arch.guest_origin; 213 unsigned long origin = vcpu->arch.sie_block->gmsor;
213 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 214 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
214 215
215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 216 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
216 goto slowpath; 217 goto slowpath;
@@ -244,8 +245,8 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
244 unsigned long guestdest, 245 unsigned long guestdest,
245 const void *from, unsigned long n) 246 const void *from, unsigned long n)
246{ 247{
247 unsigned long origin = vcpu->kvm->arch.guest_origin; 248 unsigned long origin = vcpu->arch.sie_block->gmsor;
248 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 249 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
249 250
250 if (guestdest + n > memsize) 251 if (guestdest + n > memsize)
251 return -EFAULT; 252 return -EFAULT;
@@ -262,8 +263,8 @@ static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
262 unsigned long guestsrc, 263 unsigned long guestsrc,
263 unsigned long n) 264 unsigned long n)
264{ 265{
265 unsigned long origin = vcpu->kvm->arch.guest_origin; 266 unsigned long origin = vcpu->arch.sie_block->gmsor;
266 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 267 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
267 268
268 if (guestsrc + n > memsize) 269 if (guestsrc + n > memsize)
269 return -EFAULT; 270 return -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 98997ccba501..ba9d8a7bc1ac 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intercept.c - in-kernel handling for sie intercepts 2 * intercept.c - in-kernel handling for sie intercepts
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -128,7 +128,7 @@ static int handle_noop(struct kvm_vcpu *vcpu)
128 128
129static int handle_stop(struct kvm_vcpu *vcpu) 129static int handle_stop(struct kvm_vcpu *vcpu)
130{ 130{
131 int rc; 131 int rc = 0;
132 132
133 vcpu->stat.exit_stop_request++; 133 vcpu->stat.exit_stop_request++;
134 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 134 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -141,12 +141,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
141 rc = -ENOTSUPP; 141 rc = -ENOTSUPP;
142 } 142 }
143 143
144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
145 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
146 rc = SIE_INTERCEPT_RERUNVCPU;
147 vcpu->run->exit_reason = KVM_EXIT_INTR;
148 }
149
144 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
145 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
146 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
147 rc = -ENOTSUPP; 153 rc = -ENOTSUPP;
148 } else 154 }
149 rc = 0; 155
150 spin_unlock_bh(&vcpu->arch.local_int.lock); 156 spin_unlock_bh(&vcpu->arch.local_int.lock);
151 return rc; 157 return rc;
152} 158}
@@ -158,9 +164,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
158 164
159 vcpu->stat.exit_validity++; 165 vcpu->stat.exit_validity++;
160 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix 166 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
161 <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){ 167 <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
162 rc = fault_in_pages_writeable((char __user *) 168 rc = fault_in_pages_writeable((char __user *)
163 vcpu->kvm->arch.guest_origin + 169 vcpu->arch.sie_block->gmsor +
164 vcpu->arch.sie_block->prefix, 170 vcpu->arch.sie_block->prefix,
165 2*PAGE_SIZE); 171 2*PAGE_SIZE);
166 if (rc) 172 if (rc)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 4d613415c435..2c2f98353415 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -283,7 +283,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
283 return 1; 283 return 1;
284} 284}
285 285
286int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 286static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
287{ 287{
288 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 288 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
289 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 289 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
@@ -320,12 +320,6 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
320 return rc; 320 return rc;
321} 321}
322 322
323int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
324{
325 /* do real check here */
326 return 1;
327}
328
329int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 323int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
330{ 324{
331 return 0; 325 return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 90d9d1ba258b..07ced89740d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * s390host.c -- hosting zSeries kernel virtual machines 2 * s390host.c -- hosting zSeries kernel virtual machines
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -10,6 +10,7 @@
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com> 12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */ 14 */
14 15
15#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -210,13 +211,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
210static void kvm_free_vcpus(struct kvm *kvm) 211static void kvm_free_vcpus(struct kvm *kvm)
211{ 212{
212 unsigned int i; 213 unsigned int i;
214 struct kvm_vcpu *vcpu;
213 215
214 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 216 kvm_for_each_vcpu(i, vcpu, kvm)
215 if (kvm->vcpus[i]) { 217 kvm_arch_vcpu_destroy(vcpu);
216 kvm_arch_vcpu_destroy(kvm->vcpus[i]); 218
217 kvm->vcpus[i] = NULL; 219 mutex_lock(&kvm->lock);
218 } 220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
219 } 221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
220} 225}
221 226
222void kvm_arch_sync_events(struct kvm *kvm) 227void kvm_arch_sync_events(struct kvm *kvm)
@@ -278,16 +283,10 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 vcpu->arch.sie_block->gbea = 1; 283 vcpu->arch.sie_block->gbea = 1;
279} 284}
280 285
281/* The current code can have up to 256 pages for virtio */
282#define VIRTIODESCSPACE (256ul * 4096ul)
283
284int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
285{ 287{
286 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
287 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize + 289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
288 vcpu->kvm->arch.guest_origin +
289 VIRTIODESCSPACE - 1ul;
290 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
291 vcpu->arch.sie_block->ecb = 2; 290 vcpu->arch.sie_block->ecb = 2;
292 vcpu->arch.sie_block->eca = 0xC1002001U; 291 vcpu->arch.sie_block->eca = 0xC1002001U;
293 vcpu->arch.sie_block->fac = (int) (long) facilities; 292 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -319,8 +318,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
319 BUG_ON(!kvm->arch.sca); 318 BUG_ON(!kvm->arch.sca);
320 if (!kvm->arch.sca->cpu[id].sda) 319 if (!kvm->arch.sca->cpu[id].sda)
321 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
322 else
323 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
324 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
325 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
326 323
@@ -490,9 +487,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
490 487
491 vcpu_load(vcpu); 488 vcpu_load(vcpu);
492 489
490rerun_vcpu:
491 if (vcpu->requests)
492 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
493 kvm_s390_vcpu_set_mem(vcpu);
494
493 /* verify, that memory has been registered */ 495 /* verify, that memory has been registered */
494 if (!vcpu->kvm->arch.guest_memsize) { 496 if (!vcpu->arch.sie_block->gmslm) {
495 vcpu_put(vcpu); 497 vcpu_put(vcpu);
498 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
496 return -EINVAL; 499 return -EINVAL;
497 } 500 }
498 501
@@ -509,6 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
509 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; 512 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
510 break; 513 break;
511 case KVM_EXIT_UNKNOWN: 514 case KVM_EXIT_UNKNOWN:
515 case KVM_EXIT_INTR:
512 case KVM_EXIT_S390_RESET: 516 case KVM_EXIT_S390_RESET:
513 break; 517 break;
514 default: 518 default:
@@ -522,8 +526,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
522 rc = kvm_handle_sie_intercept(vcpu); 526 rc = kvm_handle_sie_intercept(vcpu);
523 } while (!signal_pending(current) && !rc); 527 } while (!signal_pending(current) && !rc);
524 528
525 if (signal_pending(current) && !rc) 529 if (rc == SIE_INTERCEPT_RERUNVCPU)
530 goto rerun_vcpu;
531
532 if (signal_pending(current) && !rc) {
533 kvm_run->exit_reason = KVM_EXIT_INTR;
526 rc = -EINTR; 534 rc = -EINTR;
535 }
527 536
528 if (rc == -ENOTSUPP) { 537 if (rc == -ENOTSUPP) {
529 /* intercept cannot be handled in-kernel, prepare kvm-run */ 538 /* intercept cannot be handled in-kernel, prepare kvm-run */
@@ -676,6 +685,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 int user_alloc) 685 int user_alloc)
677{ 686{
678 int i; 687 int i;
688 struct kvm_vcpu *vcpu;
679 689
680 /* A few sanity checks. We can have exactly one memory slot which has 690 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a 691 to start at guest virtual zero and which has to be located at a
@@ -684,7 +694,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
684 vmas. It is okay to mmap() and munmap() stuff in this slot after 694 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */ 695 doing this call at any time */
686 696
687 if (mem->slot || kvm->arch.guest_memsize) 697 if (mem->slot)
688 return -EINVAL; 698 return -EINVAL;
689 699
690 if (mem->guest_phys_addr) 700 if (mem->guest_phys_addr)
@@ -699,36 +709,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
699 if (!user_alloc) 709 if (!user_alloc)
700 return -EINVAL; 710 return -EINVAL;
701 711
702 /* lock all vcpus */ 712 /* request update of sie control block for all available vcpus */
703 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 713 kvm_for_each_vcpu(i, vcpu, kvm) {
704 if (!kvm->vcpus[i]) 714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
705 continue; 715 continue;
706 if (!mutex_trylock(&kvm->vcpus[i]->mutex)) 716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
707 goto fail_out;
708 }
709
710 kvm->arch.guest_origin = mem->userspace_addr;
711 kvm->arch.guest_memsize = mem->memory_size;
712
713 /* update sie control blocks, and unlock all vcpus */
714 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
715 if (kvm->vcpus[i]) {
716 kvm->vcpus[i]->arch.sie_block->gmsor =
717 kvm->arch.guest_origin;
718 kvm->vcpus[i]->arch.sie_block->gmslm =
719 kvm->arch.guest_memsize +
720 kvm->arch.guest_origin +
721 VIRTIODESCSPACE - 1ul;
722 mutex_unlock(&kvm->vcpus[i]->mutex);
723 }
724 } 717 }
725 718
726 return 0; 719 return 0;
727
728fail_out:
729 for (; i >= 0; i--)
730 mutex_unlock(&kvm->vcpus[i]->mutex);
731 return -EINVAL;
732} 720}
733 721
734void kvm_arch_flush_shadow(struct kvm *kvm) 722void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 748fee872323..ec5eee7c25d8 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * kvm_s390.h - definition for kvm on s390 2 * kvm_s390.h - definition for kvm on s390
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -9,6 +9,7 @@
9 * 9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
12 */ 13 */
13 14
14#ifndef ARCH_S390_KVM_S390_H 15#ifndef ARCH_S390_KVM_S390_H
@@ -18,8 +19,13 @@
18#include <linux/kvm.h> 19#include <linux/kvm.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
20 21
22/* The current code can have up to 256 pages for virtio */
23#define VIRTIODESCSPACE (256ul * 4096ul)
24
21typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 25typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
22 26
27/* negativ values are error codes, positive values for internal conditions */
28#define SIE_INTERCEPT_RERUNVCPU (1<<0)
23int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); 29int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
24 30
25#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 31#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -50,6 +56,30 @@ int kvm_s390_inject_vm(struct kvm *kvm,
50int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 56int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
51 struct kvm_s390_interrupt *s390int); 57 struct kvm_s390_interrupt *s390int);
52int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60
61static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{
63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor
65 - VIRTIODESCSPACE + 1ul;
66}
67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{
70 struct kvm_memory_slot *mem;
71
72 down_read(&vcpu->kvm->slots_lock);
73 mem = &vcpu->kvm->memslots[0];
74
75 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
76 vcpu->arch.sie_block->gmslm =
77 mem->userspace_addr +
78 (mem->npages << PAGE_SHIFT) +
79 VIRTIODESCSPACE - 1ul;
80
81 up_read(&vcpu->kvm->slots_lock);
82}
53 83
54/* implemented in priv.c */ 84/* implemented in priv.c */
55int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 85int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 0ef81d6776e9..40c8c6748cfe 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sigp.c - handlinge interprocessor communication 2 * sigp.c - handlinge interprocessor communication
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -9,6 +9,7 @@
9 * 9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
12 */ 13 */
13 14
14#include <linux/kvm.h> 15#include <linux/kvm.h>
@@ -107,46 +108,57 @@ unlock:
107 return rc; 108 return rc;
108} 109}
109 110
110static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) 111static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
111{ 112{
112 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
113 struct kvm_s390_local_interrupt *li;
114 struct kvm_s390_interrupt_info *inti; 113 struct kvm_s390_interrupt_info *inti;
115 int rc;
116
117 if (cpu_addr >= KVM_MAX_VCPUS)
118 return 3; /* not operational */
119 114
120 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 115 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
121 if (!inti) 116 if (!inti)
122 return -ENOMEM; 117 return -ENOMEM;
123
124 inti->type = KVM_S390_SIGP_STOP; 118 inti->type = KVM_S390_SIGP_STOP;
125 119
126 spin_lock(&fi->lock);
127 li = fi->local_int[cpu_addr];
128 if (li == NULL) {
129 rc = 3; /* not operational */
130 kfree(inti);
131 goto unlock;
132 }
133 spin_lock_bh(&li->lock); 120 spin_lock_bh(&li->lock);
134 list_add_tail(&inti->list, &li->list); 121 list_add_tail(&inti->list, &li->list);
135 atomic_set(&li->active, 1); 122 atomic_set(&li->active, 1);
136 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 123 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
137 if (store) 124 li->action_bits |= action;
138 li->action_bits |= ACTION_STORE_ON_STOP;
139 li->action_bits |= ACTION_STOP_ON_STOP;
140 if (waitqueue_active(&li->wq)) 125 if (waitqueue_active(&li->wq))
141 wake_up_interruptible(&li->wq); 126 wake_up_interruptible(&li->wq);
142 spin_unlock_bh(&li->lock); 127 spin_unlock_bh(&li->lock);
143 rc = 0; /* order accepted */ 128
129 return 0; /* order accepted */
130}
131
132static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
133{
134 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
135 struct kvm_s390_local_interrupt *li;
136 int rc;
137
138 if (cpu_addr >= KVM_MAX_VCPUS)
139 return 3; /* not operational */
140
141 spin_lock(&fi->lock);
142 li = fi->local_int[cpu_addr];
143 if (li == NULL) {
144 rc = 3; /* not operational */
145 goto unlock;
146 }
147
148 rc = __inject_sigp_stop(li, action);
149
144unlock: 150unlock:
145 spin_unlock(&fi->lock); 151 spin_unlock(&fi->lock);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 152 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
147 return rc; 153 return rc;
148} 154}
149 155
156int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
157{
158 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
159 return __inject_sigp_stop(li, action);
160}
161
150static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 162static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
151{ 163{
152 int rc; 164 int rc;
@@ -177,9 +189,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
177 /* make sure that the new value is valid memory */ 189 /* make sure that the new value is valid memory */
178 address = address & 0x7fffe000u; 190 address = address & 0x7fffe000u;
179 if ((copy_from_guest(vcpu, &tmp, 191 if ((copy_from_guest(vcpu, &tmp,
180 (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) || 192 (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
181 (copy_from_guest(vcpu, &tmp, (u64) (address + 193 (copy_from_guest(vcpu, &tmp, (u64) (address +
182 vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) { 194 vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
183 *reg |= SIGP_STAT_INVALID_PARAMETER; 195 *reg |= SIGP_STAT_INVALID_PARAMETER;
184 return 1; /* invalid parameter */ 196 return 1; /* invalid parameter */
185 } 197 }
@@ -262,11 +274,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
262 break; 274 break;
263 case SIGP_STOP: 275 case SIGP_STOP:
264 vcpu->stat.instruction_sigp_stop++; 276 vcpu->stat.instruction_sigp_stop++;
265 rc = __sigp_stop(vcpu, cpu_addr, 0); 277 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
266 break; 278 break;
267 case SIGP_STOP_STORE_STATUS: 279 case SIGP_STOP_STORE_STATUS:
268 vcpu->stat.instruction_sigp_stop++; 280 vcpu->stat.instruction_sigp_stop++;
269 rc = __sigp_stop(vcpu, cpu_addr, 1); 281 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
270 break; 282 break;
271 case SIGP_SET_ARCH: 283 case SIGP_SET_ARCH:
272 vcpu->stat.instruction_sigp_arch++; 284 vcpu->stat.instruction_sigp_arch++;