aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorChristian Ehrhardt <ehrhardt@linux.vnet.ibm.com>2009-05-25 07:40:51 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:42 -0400
commit628eb9b8a8f3ef31d8316112a4596b1a21b38159 (patch)
treedb34c09360a93e0bb888195745f45017abb07f14 /arch/s390/kvm
parentb1d16c495d9e6fe48e7df2e1d18cafc6555a116a (diff)
KVM: s390: streamline memslot handling
This patch relocates the variables kvm-s390 uses to track guest mem addr/size. As discussed dropping the variables at struct kvm_arch level allows to use the common vcpu->request based mechanism to reload guest memory if e.g. changes via set_memory_region. The kick mechanism introduced in this series is used to ensure running vcpus leave guest state to catch the update. Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/gaccess.h23
-rw-r--r--arch/s390/kvm/intercept.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c50
-rw-r--r--arch/s390/kvm/kvm-s390.h29
-rw-r--r--arch/s390/kvm/sigp.c4
5 files changed, 61 insertions, 51 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index ed60f3a74a85..03c716a0f01f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * gaccess.h - access guest memory 2 * gaccess.h - access guest memory
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -16,13 +16,14 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "kvm-s390.h"
19 20
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 unsigned long guestaddr) 22 unsigned long guestaddr)
22{ 23{
23 unsigned long prefix = vcpu->arch.sie_block->prefix; 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 unsigned long origin = vcpu->kvm->arch.guest_origin; 25 unsigned long origin = vcpu->arch.sie_block->gmsor;
25 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 26 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
26 27
27 if (guestaddr < 2 * PAGE_SIZE) 28 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix; 29 guestaddr += prefix;
@@ -158,8 +159,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
158 const void *from, unsigned long n) 159 const void *from, unsigned long n)
159{ 160{
160 unsigned long prefix = vcpu->arch.sie_block->prefix; 161 unsigned long prefix = vcpu->arch.sie_block->prefix;
161 unsigned long origin = vcpu->kvm->arch.guest_origin; 162 unsigned long origin = vcpu->arch.sie_block->gmsor;
162 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 163 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
163 164
164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 165 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
165 goto slowpath; 166 goto slowpath;
@@ -209,8 +210,8 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
209 unsigned long guestsrc, unsigned long n) 210 unsigned long guestsrc, unsigned long n)
210{ 211{
211 unsigned long prefix = vcpu->arch.sie_block->prefix; 212 unsigned long prefix = vcpu->arch.sie_block->prefix;
212 unsigned long origin = vcpu->kvm->arch.guest_origin; 213 unsigned long origin = vcpu->arch.sie_block->gmsor;
213 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 214 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
214 215
215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 216 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
216 goto slowpath; 217 goto slowpath;
@@ -244,8 +245,8 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
244 unsigned long guestdest, 245 unsigned long guestdest,
245 const void *from, unsigned long n) 246 const void *from, unsigned long n)
246{ 247{
247 unsigned long origin = vcpu->kvm->arch.guest_origin; 248 unsigned long origin = vcpu->arch.sie_block->gmsor;
248 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 249 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
249 250
250 if (guestdest + n > memsize) 251 if (guestdest + n > memsize)
251 return -EFAULT; 252 return -EFAULT;
@@ -262,8 +263,8 @@ static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
262 unsigned long guestsrc, 263 unsigned long guestsrc,
263 unsigned long n) 264 unsigned long n)
264{ 265{
265 unsigned long origin = vcpu->kvm->arch.guest_origin; 266 unsigned long origin = vcpu->arch.sie_block->gmsor;
266 unsigned long memsize = vcpu->kvm->arch.guest_memsize; 267 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
267 268
268 if (guestsrc + n > memsize) 269 if (guestsrc + n > memsize)
269 return -EFAULT; 270 return -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 0732ab4305f4..ba9d8a7bc1ac 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intercept.c - in-kernel handling for sie intercepts 2 * intercept.c - in-kernel handling for sie intercepts
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -164,9 +164,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
164 164
165 vcpu->stat.exit_validity++; 165 vcpu->stat.exit_validity++;
166 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix 166 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
167 <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){ 167 <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
168 rc = fault_in_pages_writeable((char __user *) 168 rc = fault_in_pages_writeable((char __user *)
169 vcpu->kvm->arch.guest_origin + 169 vcpu->arch.sie_block->gmsor +
170 vcpu->arch.sie_block->prefix, 170 vcpu->arch.sie_block->prefix,
171 2*PAGE_SIZE); 171 2*PAGE_SIZE);
172 if (rc) 172 if (rc)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 5c1c30259002..098bfa6fbdf6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * s390host.c -- hosting zSeries kernel virtual machines 2 * s390host.c -- hosting zSeries kernel virtual machines
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -10,6 +10,7 @@
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com> 12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */ 14 */
14 15
15#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -278,16 +279,10 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 vcpu->arch.sie_block->gbea = 1; 279 vcpu->arch.sie_block->gbea = 1;
279} 280}
280 281
281/* The current code can have up to 256 pages for virtio */
282#define VIRTIODESCSPACE (256ul * 4096ul)
283
284int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 282int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
285{ 283{
286 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 284 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
287 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize + 285 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
288 vcpu->kvm->arch.guest_origin +
289 VIRTIODESCSPACE - 1ul;
290 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
291 vcpu->arch.sie_block->ecb = 2; 286 vcpu->arch.sie_block->ecb = 2;
292 vcpu->arch.sie_block->eca = 0xC1002001U; 287 vcpu->arch.sie_block->eca = 0xC1002001U;
293 vcpu->arch.sie_block->fac = (int) (long) facilities; 288 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -491,9 +486,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
491 vcpu_load(vcpu); 486 vcpu_load(vcpu);
492 487
493rerun_vcpu: 488rerun_vcpu:
489 if (vcpu->requests)
490 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
491 kvm_s390_vcpu_set_mem(vcpu);
492
494 /* verify, that memory has been registered */ 493 /* verify, that memory has been registered */
495 if (!vcpu->kvm->arch.guest_memsize) { 494 if (!vcpu->arch.sie_block->gmslm) {
496 vcpu_put(vcpu); 495 vcpu_put(vcpu);
496 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
497 return -EINVAL; 497 return -EINVAL;
498 } 498 }
499 499
@@ -691,7 +691,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
691 vmas. It is okay to mmap() and munmap() stuff in this slot after 691 vmas. It is okay to mmap() and munmap() stuff in this slot after
692 doing this call at any time */ 692 doing this call at any time */
693 693
694 if (mem->slot || kvm->arch.guest_memsize) 694 if (mem->slot)
695 return -EINVAL; 695 return -EINVAL;
696 696
697 if (mem->guest_phys_addr) 697 if (mem->guest_phys_addr)
@@ -706,36 +706,18 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
706 if (!user_alloc) 706 if (!user_alloc)
707 return -EINVAL; 707 return -EINVAL;
708 708
709 /* lock all vcpus */ 709 /* request update of sie control block for all available vcpus */
710 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
711 if (!kvm->vcpus[i])
712 continue;
713 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
714 goto fail_out;
715 }
716
717 kvm->arch.guest_origin = mem->userspace_addr;
718 kvm->arch.guest_memsize = mem->memory_size;
719
720 /* update sie control blocks, and unlock all vcpus */
721 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 710 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
722 if (kvm->vcpus[i]) { 711 if (kvm->vcpus[i]) {
723 kvm->vcpus[i]->arch.sie_block->gmsor = 712 if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
724 kvm->arch.guest_origin; 713 &kvm->vcpus[i]->requests))
725 kvm->vcpus[i]->arch.sie_block->gmslm = 714 continue;
726 kvm->arch.guest_memsize + 715 kvm_s390_inject_sigp_stop(kvm->vcpus[i],
727 kvm->arch.guest_origin + 716 ACTION_RELOADVCPU_ON_STOP);
728 VIRTIODESCSPACE - 1ul;
729 mutex_unlock(&kvm->vcpus[i]->mutex);
730 } 717 }
731 } 718 }
732 719
733 return 0; 720 return 0;
734
735fail_out:
736 for (; i >= 0; i--)
737 mutex_unlock(&kvm->vcpus[i]->mutex);
738 return -EINVAL;
739} 721}
740 722
741void kvm_arch_flush_shadow(struct kvm *kvm) 723void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 2072cd4a013e..ec5eee7c25d8 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * kvm_s390.h - definition for kvm on s390 2 * kvm_s390.h - definition for kvm on s390
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -9,6 +9,7 @@
9 * 9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
12 */ 13 */
13 14
14#ifndef ARCH_S390_KVM_S390_H 15#ifndef ARCH_S390_KVM_S390_H
@@ -18,6 +19,9 @@
18#include <linux/kvm.h> 19#include <linux/kvm.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
20 21
22/* The current code can have up to 256 pages for virtio */
23#define VIRTIODESCSPACE (256ul * 4096ul)
24
21typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 25typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
22 26
23/* negativ values are error codes, positive values for internal conditions */ 27/* negativ values are error codes, positive values for internal conditions */
@@ -54,6 +58,29 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
54int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
55int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
56 60
61static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{
63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor
65 - VIRTIODESCSPACE + 1ul;
66}
67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{
70 struct kvm_memory_slot *mem;
71
72 down_read(&vcpu->kvm->slots_lock);
73 mem = &vcpu->kvm->memslots[0];
74
75 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
76 vcpu->arch.sie_block->gmslm =
77 mem->userspace_addr +
78 (mem->npages << PAGE_SHIFT) +
79 VIRTIODESCSPACE - 1ul;
80
81 up_read(&vcpu->kvm->slots_lock);
82}
83
57/* implemented in priv.c */ 84/* implemented in priv.c */
58int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 85int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
59 86
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 21897b0f8a36..40c8c6748cfe 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -189,9 +189,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
189 /* make sure that the new value is valid memory */ 189 /* make sure that the new value is valid memory */
190 address = address & 0x7fffe000u; 190 address = address & 0x7fffe000u;
191 if ((copy_from_guest(vcpu, &tmp, 191 if ((copy_from_guest(vcpu, &tmp,
192 (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) || 192 (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
193 (copy_from_guest(vcpu, &tmp, (u64) (address + 193 (copy_from_guest(vcpu, &tmp, (u64) (address +
194 vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) { 194 vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
195 *reg |= SIGP_STAT_INVALID_PARAMETER; 195 *reg |= SIGP_STAT_INVALID_PARAMETER;
196 return 1; /* invalid parameter */ 196 return 1; /* invalid parameter */
197 } 197 }