diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 14 | ||||
-rw-r--r-- | arch/s390/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 14 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/Kconfig | 46 | ||||
-rw-r--r-- | arch/s390/kvm/Makefile | 14 | ||||
-rw-r--r-- | arch/s390/kvm/diag.c | 67 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.h | 274 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 216 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 592 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 685 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 64 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 323 | ||||
-rw-r--r-- | arch/s390/kvm/sie64a.S | 47 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 288 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 65 |
17 files changed, 2709 insertions, 7 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f6a68e178fc5..8f5f02160ffc 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -62,6 +62,10 @@ config GENERIC_LOCKBREAK | |||
62 | default y | 62 | default y |
63 | depends on SMP && PREEMPT | 63 | depends on SMP && PREEMPT |
64 | 64 | ||
65 | config PGSTE | ||
66 | bool | ||
67 | default y if KVM | ||
68 | |||
65 | mainmenu "Linux Kernel Configuration" | 69 | mainmenu "Linux Kernel Configuration" |
66 | 70 | ||
67 | config S390 | 71 | config S390 |
@@ -69,6 +73,7 @@ config S390 | |||
69 | select HAVE_OPROFILE | 73 | select HAVE_OPROFILE |
70 | select HAVE_KPROBES | 74 | select HAVE_KPROBES |
71 | select HAVE_KRETPROBES | 75 | select HAVE_KRETPROBES |
76 | select HAVE_KVM if 64BIT | ||
72 | 77 | ||
73 | source "init/Kconfig" | 78 | source "init/Kconfig" |
74 | 79 | ||
@@ -515,6 +520,13 @@ config ZFCPDUMP | |||
515 | Select this option if you want to build an zfcpdump enabled kernel. | 520 | Select this option if you want to build an zfcpdump enabled kernel. |
516 | Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. | 521 | Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. |
517 | 522 | ||
523 | config S390_GUEST | ||
524 | bool "s390 guest support (EXPERIMENTAL)" | ||
525 | depends on 64BIT && EXPERIMENTAL | ||
526 | select VIRTIO | ||
527 | select VIRTIO_RING | ||
528 | help | ||
529 | Select this option if you want to run the kernel under s390 linux | ||
518 | endmenu | 530 | endmenu |
519 | 531 | ||
520 | source "net/Kconfig" | 532 | source "net/Kconfig" |
@@ -536,3 +548,5 @@ source "security/Kconfig" | |||
536 | source "crypto/Kconfig" | 548 | source "crypto/Kconfig" |
537 | 549 | ||
538 | source "lib/Kconfig" | 550 | source "lib/Kconfig" |
551 | |||
552 | source "arch/s390/kvm/Kconfig" | ||
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index f708be367b03..792a4e7743ce 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -87,7 +87,7 @@ LDFLAGS_vmlinux := -e start | |||
87 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 87 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
88 | 88 | ||
89 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 89 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
90 | arch/s390/appldata/ arch/s390/hypfs/ | 90 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ |
91 | libs-y += arch/s390/lib/ | 91 | libs-y += arch/s390/lib/ |
92 | drivers-y += drivers/s390/ | 92 | drivers-y += drivers/s390/ |
93 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ | 93 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 540a67f979b6..68ec4083bf73 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -144,6 +144,10 @@ static noinline __init void detect_machine_type(void) | |||
144 | /* Running on a P/390 ? */ | 144 | /* Running on a P/390 ? */ |
145 | if (cpuinfo->cpu_id.machine == 0x7490) | 145 | if (cpuinfo->cpu_id.machine == 0x7490) |
146 | machine_flags |= 4; | 146 | machine_flags |= 4; |
147 | |||
148 | /* Running under KVM ? */ | ||
149 | if (cpuinfo->cpu_id.version == 0xfe) | ||
150 | machine_flags |= 64; | ||
147 | } | 151 | } |
148 | 152 | ||
149 | #ifdef CONFIG_64BIT | 153 | #ifdef CONFIG_64BIT |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7141147e6b63..a9d18aafa5f4 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -316,7 +316,11 @@ static int __init early_parse_ipldelay(char *p) | |||
316 | early_param("ipldelay", early_parse_ipldelay); | 316 | early_param("ipldelay", early_parse_ipldelay); |
317 | 317 | ||
318 | #ifdef CONFIG_S390_SWITCH_AMODE | 318 | #ifdef CONFIG_S390_SWITCH_AMODE |
319 | #ifdef CONFIG_PGSTE | ||
320 | unsigned int switch_amode = 1; | ||
321 | #else | ||
319 | unsigned int switch_amode = 0; | 322 | unsigned int switch_amode = 0; |
323 | #endif | ||
320 | EXPORT_SYMBOL_GPL(switch_amode); | 324 | EXPORT_SYMBOL_GPL(switch_amode); |
321 | 325 | ||
322 | static void set_amode_and_uaccess(unsigned long user_amode, | 326 | static void set_amode_and_uaccess(unsigned long user_amode, |
@@ -797,9 +801,13 @@ setup_arch(char **cmdline_p) | |||
797 | "This machine has an IEEE fpu\n" : | 801 | "This machine has an IEEE fpu\n" : |
798 | "This machine has no IEEE fpu\n"); | 802 | "This machine has no IEEE fpu\n"); |
799 | #else /* CONFIG_64BIT */ | 803 | #else /* CONFIG_64BIT */ |
800 | printk((MACHINE_IS_VM) ? | 804 | if (MACHINE_IS_VM) |
801 | "We are running under VM (64 bit mode)\n" : | 805 | printk("We are running under VM (64 bit mode)\n"); |
802 | "We are running native (64 bit mode)\n"); | 806 | else if (MACHINE_IS_KVM) { |
807 | printk("We are running under KVM (64 bit mode)\n"); | ||
808 | add_preferred_console("ttyS", 1, NULL); | ||
809 | } else | ||
810 | printk("We are running native (64 bit mode)\n"); | ||
803 | #endif /* CONFIG_64BIT */ | 811 | #endif /* CONFIG_64BIT */ |
804 | 812 | ||
805 | /* Save unparsed command line copy for /proc/cmdline */ | 813 | /* Save unparsed command line copy for /proc/cmdline */ |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c5f05b3fb2c3..ca90ee3f930e 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -110,6 +110,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
110 | S390_lowcore.steal_clock -= cputime << 12; | 110 | S390_lowcore.steal_clock -= cputime << 12; |
111 | account_system_time(tsk, 0, cputime); | 111 | account_system_time(tsk, 0, cputime); |
112 | } | 112 | } |
113 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
113 | 114 | ||
114 | static inline void set_vtimer(__u64 expires) | 115 | static inline void set_vtimer(__u64 expires) |
115 | { | 116 | { |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig new file mode 100644 index 000000000000..1761b74d639b --- /dev/null +++ b/arch/s390/kvm/Kconfig | |||
@@ -0,0 +1,46 @@ | |||
1 | # | ||
2 | # KVM configuration | ||
3 | # | ||
4 | config HAVE_KVM | ||
5 | bool | ||
6 | |||
7 | menuconfig VIRTUALIZATION | ||
8 | bool "Virtualization" | ||
9 | default y | ||
10 | ---help--- | ||
11 | Say Y here to get to see options for using your Linux host to run other | ||
12 | operating systems inside virtual machines (guests). | ||
13 | This option alone does not add any kernel code. | ||
14 | |||
15 | If you say N, all options in this submenu will be skipped and disabled. | ||
16 | |||
17 | if VIRTUALIZATION | ||
18 | |||
19 | config KVM | ||
20 | tristate "Kernel-based Virtual Machine (KVM) support" | ||
21 | depends on HAVE_KVM && EXPERIMENTAL | ||
22 | select PREEMPT_NOTIFIERS | ||
23 | select ANON_INODES | ||
24 | select S390_SWITCH_AMODE | ||
25 | select PREEMPT | ||
26 | ---help--- | ||
27 | Support hosting paravirtualized guest machines using the SIE | ||
28 | virtualization capability on the mainframe. This should work | ||
29 | on any 64bit machine. | ||
30 | |||
31 | This module provides access to the hardware capabilities through | ||
32 | a character device node named /dev/kvm. | ||
33 | |||
34 | To compile this as a module, choose M here: the module | ||
35 | will be called kvm. | ||
36 | |||
37 | If unsure, say N. | ||
38 | |||
39 | config KVM_TRACE | ||
40 | bool | ||
41 | |||
42 | # OK, it's a little counter-intuitive to do this, but it puts it neatly under | ||
43 | # the virtualization menu. | ||
44 | source drivers/virtio/Kconfig | ||
45 | |||
46 | endif # VIRTUALIZATION | ||
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile new file mode 100644 index 000000000000..e5221ec0b8e3 --- /dev/null +++ b/arch/s390/kvm/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # Makefile for kernel virtual machines on s390 | ||
2 | # | ||
3 | # Copyright IBM Corp. 2008 | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify | ||
6 | # it under the terms of the GNU General Public License (version 2 only) | ||
7 | # as published by the Free Software Foundation. | ||
8 | |||
9 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) | ||
10 | |||
11 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm | ||
12 | |||
13 | kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o | ||
14 | obj-$(CONFIG_KVM) += kvm.o | ||
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c new file mode 100644 index 000000000000..f639a152869f --- /dev/null +++ b/arch/s390/kvm/diag.c | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * diag.c - handling diagnose instructions | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | #include <linux/kvm.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include "kvm-s390.h" | ||
17 | |||
18 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | ||
19 | { | ||
20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | ||
21 | vcpu->stat.diagnose_44++; | ||
22 | vcpu_put(vcpu); | ||
23 | schedule(); | ||
24 | vcpu_load(vcpu); | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | ||
29 | { | ||
30 | unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; | ||
31 | unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff; | ||
32 | |||
33 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); | ||
34 | switch (subcode) { | ||
35 | case 3: | ||
36 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; | ||
37 | break; | ||
38 | case 4: | ||
39 | vcpu->run->s390_reset_flags = 0; | ||
40 | break; | ||
41 | default: | ||
42 | return -ENOTSUPP; | ||
43 | } | ||
44 | |||
45 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
46 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | ||
47 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | ||
48 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | ||
49 | vcpu->run->exit_reason = KVM_EXIT_S390_RESET; | ||
50 | VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx", | ||
51 | vcpu->run->s390_reset_flags); | ||
52 | return -EREMOTE; | ||
53 | } | ||
54 | |||
55 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | ||
56 | { | ||
57 | int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; | ||
58 | |||
59 | switch (code) { | ||
60 | case 0x44: | ||
61 | return __diag_time_slice_end(vcpu); | ||
62 | case 0x308: | ||
63 | return __diag_ipl_functions(vcpu); | ||
64 | default: | ||
65 | return -ENOTSUPP; | ||
66 | } | ||
67 | } | ||
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h new file mode 100644 index 000000000000..4e0633c413f3 --- /dev/null +++ b/arch/s390/kvm/gaccess.h | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * gaccess.h - access guest memory | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #ifndef __KVM_S390_GACCESS_H | ||
14 | #define __KVM_S390_GACCESS_H | ||
15 | |||
16 | #include <linux/compiler.h> | ||
17 | #include <linux/kvm_host.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | |||
20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | ||
21 | u64 guestaddr) | ||
22 | { | ||
23 | u64 prefix = vcpu->arch.sie_block->prefix; | ||
24 | u64 origin = vcpu->kvm->arch.guest_origin; | ||
25 | u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
26 | |||
27 | if (guestaddr < 2 * PAGE_SIZE) | ||
28 | guestaddr += prefix; | ||
29 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) | ||
30 | guestaddr -= prefix; | ||
31 | |||
32 | if (guestaddr > memsize) | ||
33 | return (void __user __force *) ERR_PTR(-EFAULT); | ||
34 | |||
35 | guestaddr += origin; | ||
36 | |||
37 | return (void __user *) guestaddr; | ||
38 | } | ||
39 | |||
40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
41 | u64 *result) | ||
42 | { | ||
43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
44 | |||
45 | BUG_ON(guestaddr & 7); | ||
46 | |||
47 | if (IS_ERR((void __force *) uptr)) | ||
48 | return PTR_ERR((void __force *) uptr); | ||
49 | |||
50 | return get_user(*result, (u64 __user *) uptr); | ||
51 | } | ||
52 | |||
53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
54 | u32 *result) | ||
55 | { | ||
56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
57 | |||
58 | BUG_ON(guestaddr & 3); | ||
59 | |||
60 | if (IS_ERR((void __force *) uptr)) | ||
61 | return PTR_ERR((void __force *) uptr); | ||
62 | |||
63 | return get_user(*result, (u32 __user *) uptr); | ||
64 | } | ||
65 | |||
66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
67 | u16 *result) | ||
68 | { | ||
69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
70 | |||
71 | BUG_ON(guestaddr & 1); | ||
72 | |||
73 | if (IS_ERR(uptr)) | ||
74 | return PTR_ERR(uptr); | ||
75 | |||
76 | return get_user(*result, (u16 __user *) uptr); | ||
77 | } | ||
78 | |||
79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
80 | u8 *result) | ||
81 | { | ||
82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
83 | |||
84 | if (IS_ERR((void __force *) uptr)) | ||
85 | return PTR_ERR((void __force *) uptr); | ||
86 | |||
87 | return get_user(*result, (u8 __user *) uptr); | ||
88 | } | ||
89 | |||
90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
91 | u64 value) | ||
92 | { | ||
93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
94 | |||
95 | BUG_ON(guestaddr & 7); | ||
96 | |||
97 | if (IS_ERR((void __force *) uptr)) | ||
98 | return PTR_ERR((void __force *) uptr); | ||
99 | |||
100 | return put_user(value, (u64 __user *) uptr); | ||
101 | } | ||
102 | |||
103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
104 | u32 value) | ||
105 | { | ||
106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
107 | |||
108 | BUG_ON(guestaddr & 3); | ||
109 | |||
110 | if (IS_ERR((void __force *) uptr)) | ||
111 | return PTR_ERR((void __force *) uptr); | ||
112 | |||
113 | return put_user(value, (u32 __user *) uptr); | ||
114 | } | ||
115 | |||
116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
117 | u16 value) | ||
118 | { | ||
119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
120 | |||
121 | BUG_ON(guestaddr & 1); | ||
122 | |||
123 | if (IS_ERR((void __force *) uptr)) | ||
124 | return PTR_ERR((void __force *) uptr); | ||
125 | |||
126 | return put_user(value, (u16 __user *) uptr); | ||
127 | } | ||
128 | |||
129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
130 | u8 value) | ||
131 | { | ||
132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
133 | |||
134 | if (IS_ERR((void __force *) uptr)) | ||
135 | return PTR_ERR((void __force *) uptr); | ||
136 | |||
137 | return put_user(value, (u8 __user *) uptr); | ||
138 | } | ||
139 | |||
140 | |||
141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | ||
142 | const void *from, unsigned long n) | ||
143 | { | ||
144 | int rc; | ||
145 | unsigned long i; | ||
146 | const u8 *data = from; | ||
147 | |||
148 | for (i = 0; i < n; i++) { | ||
149 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); | ||
150 | if (rc < 0) | ||
151 | return rc; | ||
152 | } | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, | ||
157 | const void *from, unsigned long n) | ||
158 | { | ||
159 | u64 prefix = vcpu->arch.sie_block->prefix; | ||
160 | u64 origin = vcpu->kvm->arch.guest_origin; | ||
161 | u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
162 | |||
163 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | ||
164 | goto slowpath; | ||
165 | |||
166 | if ((guestdest < prefix) && (guestdest + n > prefix)) | ||
167 | goto slowpath; | ||
168 | |||
169 | if ((guestdest < prefix + 2 * PAGE_SIZE) | ||
170 | && (guestdest + n > prefix + 2 * PAGE_SIZE)) | ||
171 | goto slowpath; | ||
172 | |||
173 | if (guestdest < 2 * PAGE_SIZE) | ||
174 | guestdest += prefix; | ||
175 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) | ||
176 | guestdest -= prefix; | ||
177 | |||
178 | if (guestdest + n > memsize) | ||
179 | return -EFAULT; | ||
180 | |||
181 | if (guestdest + n < guestdest) | ||
182 | return -EFAULT; | ||
183 | |||
184 | guestdest += origin; | ||
185 | |||
186 | return copy_to_user((void __user *) guestdest, from, n); | ||
187 | slowpath: | ||
188 | return __copy_to_guest_slow(vcpu, guestdest, from, n); | ||
189 | } | ||
190 | |||
191 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | ||
192 | u64 guestsrc, unsigned long n) | ||
193 | { | ||
194 | int rc; | ||
195 | unsigned long i; | ||
196 | u8 *data = to; | ||
197 | |||
198 | for (i = 0; i < n; i++) { | ||
199 | rc = get_guest_u8(vcpu, guestsrc++, data++); | ||
200 | if (rc < 0) | ||
201 | return rc; | ||
202 | } | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | ||
207 | u64 guestsrc, unsigned long n) | ||
208 | { | ||
209 | u64 prefix = vcpu->arch.sie_block->prefix; | ||
210 | u64 origin = vcpu->kvm->arch.guest_origin; | ||
211 | u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
212 | |||
213 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | ||
214 | goto slowpath; | ||
215 | |||
216 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) | ||
217 | goto slowpath; | ||
218 | |||
219 | if ((guestsrc < prefix + 2 * PAGE_SIZE) | ||
220 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) | ||
221 | goto slowpath; | ||
222 | |||
223 | if (guestsrc < 2 * PAGE_SIZE) | ||
224 | guestsrc += prefix; | ||
225 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
226 | guestsrc -= prefix; | ||
227 | |||
228 | if (guestsrc + n > memsize) | ||
229 | return -EFAULT; | ||
230 | |||
231 | if (guestsrc + n < guestsrc) | ||
232 | return -EFAULT; | ||
233 | |||
234 | guestsrc += origin; | ||
235 | |||
236 | return copy_from_user(to, (void __user *) guestsrc, n); | ||
237 | slowpath: | ||
238 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
239 | } | ||
240 | |||
241 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | ||
242 | const void *from, unsigned long n) | ||
243 | { | ||
244 | u64 origin = vcpu->kvm->arch.guest_origin; | ||
245 | u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
246 | |||
247 | if (guestdest + n > memsize) | ||
248 | return -EFAULT; | ||
249 | |||
250 | if (guestdest + n < guestdest) | ||
251 | return -EFAULT; | ||
252 | |||
253 | guestdest += origin; | ||
254 | |||
255 | return copy_to_user((void __user *) guestdest, from, n); | ||
256 | } | ||
257 | |||
258 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | ||
259 | u64 guestsrc, unsigned long n) | ||
260 | { | ||
261 | u64 origin = vcpu->kvm->arch.guest_origin; | ||
262 | u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
263 | |||
264 | if (guestsrc + n > memsize) | ||
265 | return -EFAULT; | ||
266 | |||
267 | if (guestsrc + n < guestsrc) | ||
268 | return -EFAULT; | ||
269 | |||
270 | guestsrc += origin; | ||
271 | |||
272 | return copy_from_user(to, (void __user *) guestsrc, n); | ||
273 | } | ||
274 | #endif | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c new file mode 100644 index 000000000000..349581a26103 --- /dev/null +++ b/arch/s390/kvm/intercept.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * intercept.c - in-kernel handling for sie intercepts | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | #include <linux/kvm_host.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | |||
18 | #include <asm/kvm_host.h> | ||
19 | |||
20 | #include "kvm-s390.h" | ||
21 | #include "gaccess.h" | ||
22 | |||
23 | static int handle_lctg(struct kvm_vcpu *vcpu) | ||
24 | { | ||
25 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
26 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
27 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
28 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | ||
29 | ((vcpu->arch.sie_block->ipb & 0xff00) << 4); | ||
30 | u64 useraddr; | ||
31 | int reg, rc; | ||
32 | |||
33 | vcpu->stat.instruction_lctg++; | ||
34 | if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) | ||
35 | return -ENOTSUPP; | ||
36 | |||
37 | useraddr = disp2; | ||
38 | if (base2) | ||
39 | useraddr += vcpu->arch.guest_gprs[base2]; | ||
40 | |||
41 | reg = reg1; | ||
42 | |||
43 | VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, | ||
44 | disp2); | ||
45 | |||
46 | do { | ||
47 | rc = get_guest_u64(vcpu, useraddr, | ||
48 | &vcpu->arch.sie_block->gcr[reg]); | ||
49 | if (rc == -EFAULT) { | ||
50 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
51 | break; | ||
52 | } | ||
53 | useraddr += 8; | ||
54 | if (reg == reg3) | ||
55 | break; | ||
56 | reg = (reg + 1) % 16; | ||
57 | } while (1); | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static int handle_lctl(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
64 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
65 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
66 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
67 | u64 useraddr; | ||
68 | u32 val = 0; | ||
69 | int reg, rc; | ||
70 | |||
71 | vcpu->stat.instruction_lctl++; | ||
72 | |||
73 | useraddr = disp2; | ||
74 | if (base2) | ||
75 | useraddr += vcpu->arch.guest_gprs[base2]; | ||
76 | |||
77 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, | ||
78 | disp2); | ||
79 | |||
80 | reg = reg1; | ||
81 | do { | ||
82 | rc = get_guest_u32(vcpu, useraddr, &val); | ||
83 | if (rc == -EFAULT) { | ||
84 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
85 | break; | ||
86 | } | ||
87 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; | ||
88 | vcpu->arch.sie_block->gcr[reg] |= val; | ||
89 | useraddr += 4; | ||
90 | if (reg == reg3) | ||
91 | break; | ||
92 | reg = (reg + 1) % 16; | ||
93 | } while (1); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static intercept_handler_t instruction_handlers[256] = { | ||
98 | [0x83] = kvm_s390_handle_diag, | ||
99 | [0xae] = kvm_s390_handle_sigp, | ||
100 | [0xb2] = kvm_s390_handle_priv, | ||
101 | [0xb7] = handle_lctl, | ||
102 | [0xeb] = handle_lctg, | ||
103 | }; | ||
104 | |||
105 | static int handle_noop(struct kvm_vcpu *vcpu) | ||
106 | { | ||
107 | switch (vcpu->arch.sie_block->icptcode) { | ||
108 | case 0x10: | ||
109 | vcpu->stat.exit_external_request++; | ||
110 | break; | ||
111 | case 0x14: | ||
112 | vcpu->stat.exit_external_interrupt++; | ||
113 | break; | ||
114 | default: | ||
115 | break; /* nothing */ | ||
116 | } | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static int handle_stop(struct kvm_vcpu *vcpu) | ||
121 | { | ||
122 | int rc; | ||
123 | |||
124 | vcpu->stat.exit_stop_request++; | ||
125 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
126 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
127 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | ||
128 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | ||
129 | rc = __kvm_s390_vcpu_store_status(vcpu, | ||
130 | KVM_S390_STORE_STATUS_NOADDR); | ||
131 | if (rc >= 0) | ||
132 | rc = -ENOTSUPP; | ||
133 | } | ||
134 | |||
135 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | ||
136 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | ||
137 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | ||
138 | rc = -ENOTSUPP; | ||
139 | } else | ||
140 | rc = 0; | ||
141 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
142 | return rc; | ||
143 | } | ||
144 | |||
145 | static int handle_validity(struct kvm_vcpu *vcpu) | ||
146 | { | ||
147 | int viwhy = vcpu->arch.sie_block->ipb >> 16; | ||
148 | vcpu->stat.exit_validity++; | ||
149 | if (viwhy == 0x37) { | ||
150 | fault_in_pages_writeable((char __user *) | ||
151 | vcpu->kvm->arch.guest_origin + | ||
152 | vcpu->arch.sie_block->prefix, | ||
153 | PAGE_SIZE); | ||
154 | return 0; | ||
155 | } | ||
156 | VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", | ||
157 | viwhy); | ||
158 | return -ENOTSUPP; | ||
159 | } | ||
160 | |||
161 | static int handle_instruction(struct kvm_vcpu *vcpu) | ||
162 | { | ||
163 | intercept_handler_t handler; | ||
164 | |||
165 | vcpu->stat.exit_instruction++; | ||
166 | handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; | ||
167 | if (handler) | ||
168 | return handler(vcpu); | ||
169 | return -ENOTSUPP; | ||
170 | } | ||
171 | |||
172 | static int handle_prog(struct kvm_vcpu *vcpu) | ||
173 | { | ||
174 | vcpu->stat.exit_program_interruption++; | ||
175 | return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); | ||
176 | } | ||
177 | |||
178 | static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | ||
179 | { | ||
180 | int rc, rc2; | ||
181 | |||
182 | vcpu->stat.exit_instr_and_program++; | ||
183 | rc = handle_instruction(vcpu); | ||
184 | rc2 = handle_prog(vcpu); | ||
185 | |||
186 | if (rc == -ENOTSUPP) | ||
187 | vcpu->arch.sie_block->icptcode = 0x04; | ||
188 | if (rc) | ||
189 | return rc; | ||
190 | return rc2; | ||
191 | } | ||
192 | |||
193 | static const intercept_handler_t intercept_funcs[0x48 >> 2] = { | ||
194 | [0x00 >> 2] = handle_noop, | ||
195 | [0x04 >> 2] = handle_instruction, | ||
196 | [0x08 >> 2] = handle_prog, | ||
197 | [0x0C >> 2] = handle_instruction_and_prog, | ||
198 | [0x10 >> 2] = handle_noop, | ||
199 | [0x14 >> 2] = handle_noop, | ||
200 | [0x1C >> 2] = kvm_s390_handle_wait, | ||
201 | [0x20 >> 2] = handle_validity, | ||
202 | [0x28 >> 2] = handle_stop, | ||
203 | }; | ||
204 | |||
205 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | ||
206 | { | ||
207 | intercept_handler_t func; | ||
208 | u8 code = vcpu->arch.sie_block->icptcode; | ||
209 | |||
210 | if (code & 3 || code > 0x48) | ||
211 | return -ENOTSUPP; | ||
212 | func = intercept_funcs[code >> 2]; | ||
213 | if (func) | ||
214 | return func(vcpu); | ||
215 | return -ENOTSUPP; | ||
216 | } | ||
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c new file mode 100644 index 000000000000..fcd1ed8015c1 --- /dev/null +++ b/arch/s390/kvm/interrupt.c | |||
@@ -0,0 +1,592 @@ | |||
1 | /* | ||
2 | * interrupt.c - handling kvm guest interrupts | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <asm/lowcore.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include "kvm-s390.h" | ||
17 | #include "gaccess.h" | ||
18 | |||
19 | static int psw_extint_disabled(struct kvm_vcpu *vcpu) | ||
20 | { | ||
21 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | ||
22 | } | ||
23 | |||
24 | static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) | ||
25 | { | ||
26 | if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || | ||
27 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || | ||
28 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) | ||
29 | return 0; | ||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | ||
34 | struct interrupt_info *inti) | ||
35 | { | ||
36 | switch (inti->type) { | ||
37 | case KVM_S390_INT_EMERGENCY: | ||
38 | if (psw_extint_disabled(vcpu)) | ||
39 | return 0; | ||
40 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | ||
41 | return 1; | ||
42 | return 0; | ||
43 | case KVM_S390_INT_SERVICE: | ||
44 | if (psw_extint_disabled(vcpu)) | ||
45 | return 0; | ||
46 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
47 | return 1; | ||
48 | return 0; | ||
49 | case KVM_S390_INT_VIRTIO: | ||
50 | if (psw_extint_disabled(vcpu)) | ||
51 | return 0; | ||
52 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
53 | return 1; | ||
54 | return 0; | ||
55 | case KVM_S390_PROGRAM_INT: | ||
56 | case KVM_S390_SIGP_STOP: | ||
57 | case KVM_S390_SIGP_SET_PREFIX: | ||
58 | case KVM_S390_RESTART: | ||
59 | return 1; | ||
60 | default: | ||
61 | BUG(); | ||
62 | } | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
69 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | ||
70 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | ||
71 | } | ||
72 | |||
73 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
76 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | ||
77 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | ||
78 | } | ||
79 | |||
80 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | ||
81 | { | ||
82 | atomic_clear_mask(CPUSTAT_ECALL_PEND | | ||
83 | CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | ||
84 | &vcpu->arch.sie_block->cpuflags); | ||
85 | vcpu->arch.sie_block->lctl = 0x0000; | ||
86 | } | ||
87 | |||
88 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | ||
89 | { | ||
90 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | ||
91 | } | ||
92 | |||
93 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | ||
94 | struct interrupt_info *inti) | ||
95 | { | ||
96 | switch (inti->type) { | ||
97 | case KVM_S390_INT_EMERGENCY: | ||
98 | case KVM_S390_INT_SERVICE: | ||
99 | case KVM_S390_INT_VIRTIO: | ||
100 | if (psw_extint_disabled(vcpu)) | ||
101 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
102 | else | ||
103 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
104 | break; | ||
105 | case KVM_S390_SIGP_STOP: | ||
106 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | ||
107 | break; | ||
108 | default: | ||
109 | BUG(); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | ||
114 | struct interrupt_info *inti) | ||
115 | { | ||
116 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
117 | int rc, exception = 0; | ||
118 | |||
119 | switch (inti->type) { | ||
120 | case KVM_S390_INT_EMERGENCY: | ||
121 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); | ||
122 | vcpu->stat.deliver_emergency_signal++; | ||
123 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); | ||
124 | if (rc == -EFAULT) | ||
125 | exception = 1; | ||
126 | |||
127 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
128 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
129 | if (rc == -EFAULT) | ||
130 | exception = 1; | ||
131 | |||
132 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
133 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
134 | if (rc == -EFAULT) | ||
135 | exception = 1; | ||
136 | break; | ||
137 | |||
138 | case KVM_S390_INT_SERVICE: | ||
139 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | ||
140 | inti->ext.ext_params); | ||
141 | vcpu->stat.deliver_service_signal++; | ||
142 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); | ||
143 | if (rc == -EFAULT) | ||
144 | exception = 1; | ||
145 | |||
146 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
147 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
148 | if (rc == -EFAULT) | ||
149 | exception = 1; | ||
150 | |||
151 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
152 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
153 | if (rc == -EFAULT) | ||
154 | exception = 1; | ||
155 | |||
156 | rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); | ||
157 | if (rc == -EFAULT) | ||
158 | exception = 1; | ||
159 | break; | ||
160 | |||
161 | case KVM_S390_INT_VIRTIO: | ||
162 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx", | ||
163 | inti->ext.ext_params, inti->ext.ext_params2); | ||
164 | vcpu->stat.deliver_virtio_interrupt++; | ||
165 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); | ||
166 | if (rc == -EFAULT) | ||
167 | exception = 1; | ||
168 | |||
169 | rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); | ||
170 | if (rc == -EFAULT) | ||
171 | exception = 1; | ||
172 | |||
173 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
174 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
175 | if (rc == -EFAULT) | ||
176 | exception = 1; | ||
177 | |||
178 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
179 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
180 | if (rc == -EFAULT) | ||
181 | exception = 1; | ||
182 | |||
183 | rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); | ||
184 | if (rc == -EFAULT) | ||
185 | exception = 1; | ||
186 | |||
187 | rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, | ||
188 | inti->ext.ext_params2); | ||
189 | if (rc == -EFAULT) | ||
190 | exception = 1; | ||
191 | break; | ||
192 | |||
193 | case KVM_S390_SIGP_STOP: | ||
194 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); | ||
195 | vcpu->stat.deliver_stop_signal++; | ||
196 | __set_intercept_indicator(vcpu, inti); | ||
197 | break; | ||
198 | |||
199 | case KVM_S390_SIGP_SET_PREFIX: | ||
200 | VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", | ||
201 | inti->prefix.address); | ||
202 | vcpu->stat.deliver_prefix_signal++; | ||
203 | vcpu->arch.sie_block->prefix = inti->prefix.address; | ||
204 | vcpu->arch.sie_block->ihcpu = 0xffff; | ||
205 | break; | ||
206 | |||
207 | case KVM_S390_RESTART: | ||
208 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); | ||
209 | vcpu->stat.deliver_restart_signal++; | ||
210 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, | ||
211 | restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
212 | if (rc == -EFAULT) | ||
213 | exception = 1; | ||
214 | |||
215 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
216 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); | ||
217 | if (rc == -EFAULT) | ||
218 | exception = 1; | ||
219 | break; | ||
220 | |||
221 | case KVM_S390_PROGRAM_INT: | ||
222 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", | ||
223 | inti->pgm.code, | ||
224 | table[vcpu->arch.sie_block->ipa >> 14]); | ||
225 | vcpu->stat.deliver_program_int++; | ||
226 | rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); | ||
227 | if (rc == -EFAULT) | ||
228 | exception = 1; | ||
229 | |||
230 | rc = put_guest_u16(vcpu, __LC_PGM_ILC, | ||
231 | table[vcpu->arch.sie_block->ipa >> 14]); | ||
232 | if (rc == -EFAULT) | ||
233 | exception = 1; | ||
234 | |||
235 | rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, | ||
236 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
237 | if (rc == -EFAULT) | ||
238 | exception = 1; | ||
239 | |||
240 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
241 | __LC_PGM_NEW_PSW, sizeof(psw_t)); | ||
242 | if (rc == -EFAULT) | ||
243 | exception = 1; | ||
244 | break; | ||
245 | |||
246 | default: | ||
247 | BUG(); | ||
248 | } | ||
249 | |||
250 | if (exception) { | ||
251 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" | ||
252 | " interrupt"); | ||
253 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
254 | if (inti->type == KVM_S390_PROGRAM_INT) { | ||
255 | printk(KERN_WARNING "kvm: recursive program check\n"); | ||
256 | BUG(); | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | ||
262 | { | ||
263 | int rc, exception = 0; | ||
264 | |||
265 | if (psw_extint_disabled(vcpu)) | ||
266 | return 0; | ||
267 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
268 | return 0; | ||
269 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); | ||
270 | if (rc == -EFAULT) | ||
271 | exception = 1; | ||
272 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
273 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
274 | if (rc == -EFAULT) | ||
275 | exception = 1; | ||
276 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
277 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
278 | if (rc == -EFAULT) | ||
279 | exception = 1; | ||
280 | |||
281 | if (exception) { | ||
282 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ | ||
283 | " ckc interrupt"); | ||
284 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | return 1; | ||
289 | } | ||
290 | |||
291 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | ||
292 | { | ||
293 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
294 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
295 | struct interrupt_info *inti; | ||
296 | int rc = 0; | ||
297 | |||
298 | if (atomic_read(&li->active)) { | ||
299 | spin_lock_bh(&li->lock); | ||
300 | list_for_each_entry(inti, &li->list, list) | ||
301 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
302 | rc = 1; | ||
303 | break; | ||
304 | } | ||
305 | spin_unlock_bh(&li->lock); | ||
306 | } | ||
307 | |||
308 | if ((!rc) && atomic_read(&fi->active)) { | ||
309 | spin_lock_bh(&fi->lock); | ||
310 | list_for_each_entry(inti, &fi->list, list) | ||
311 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
312 | rc = 1; | ||
313 | break; | ||
314 | } | ||
315 | spin_unlock_bh(&fi->lock); | ||
316 | } | ||
317 | |||
318 | if ((!rc) && (vcpu->arch.sie_block->ckc < | ||
319 | get_clock() + vcpu->arch.sie_block->epoch)) { | ||
320 | if ((!psw_extint_disabled(vcpu)) && | ||
321 | (vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
322 | rc = 1; | ||
323 | } | ||
324 | |||
325 | return rc; | ||
326 | } | ||
327 | |||
328 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
329 | { | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | ||
334 | { | ||
335 | u64 now, sltime; | ||
336 | DECLARE_WAITQUEUE(wait, current); | ||
337 | |||
338 | vcpu->stat.exit_wait_state++; | ||
339 | if (kvm_cpu_has_interrupt(vcpu)) | ||
340 | return 0; | ||
341 | |||
342 | if (psw_interrupts_disabled(vcpu)) { | ||
343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | ||
344 | __unset_cpu_idle(vcpu); | ||
345 | return -ENOTSUPP; /* disabled wait */ | ||
346 | } | ||
347 | |||
348 | if (psw_extint_disabled(vcpu) || | ||
349 | (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { | ||
350 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | ||
351 | goto no_timer; | ||
352 | } | ||
353 | |||
354 | now = get_clock() + vcpu->arch.sie_block->epoch; | ||
355 | if (vcpu->arch.sie_block->ckc < now) { | ||
356 | __unset_cpu_idle(vcpu); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; | ||
361 | |||
362 | vcpu->arch.ckc_timer.expires = jiffies + sltime; | ||
363 | |||
364 | add_timer(&vcpu->arch.ckc_timer); | ||
365 | VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime); | ||
366 | no_timer: | ||
367 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | ||
368 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
369 | __set_cpu_idle(vcpu); | ||
370 | vcpu->arch.local_int.timer_due = 0; | ||
371 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); | ||
372 | while (list_empty(&vcpu->arch.local_int.list) && | ||
373 | list_empty(&vcpu->arch.local_int.float_int->list) && | ||
374 | (!vcpu->arch.local_int.timer_due) && | ||
375 | !signal_pending(current)) { | ||
376 | set_current_state(TASK_INTERRUPTIBLE); | ||
377 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
378 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | ||
379 | vcpu_put(vcpu); | ||
380 | schedule(); | ||
381 | vcpu_load(vcpu); | ||
382 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | ||
383 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
384 | } | ||
385 | __unset_cpu_idle(vcpu); | ||
386 | __set_current_state(TASK_RUNNING); | ||
387 | remove_wait_queue(&vcpu->wq, &wait); | ||
388 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
389 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | ||
390 | del_timer(&vcpu->arch.ckc_timer); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | void kvm_s390_idle_wakeup(unsigned long data) | ||
395 | { | ||
396 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
397 | |||
398 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
399 | vcpu->arch.local_int.timer_due = 1; | ||
400 | if (waitqueue_active(&vcpu->arch.local_int.wq)) | ||
401 | wake_up_interruptible(&vcpu->arch.local_int.wq); | ||
402 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
403 | } | ||
404 | |||
405 | |||
406 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | ||
407 | { | ||
408 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
409 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
410 | struct interrupt_info *n, *inti = NULL; | ||
411 | int deliver; | ||
412 | |||
413 | __reset_intercept_indicators(vcpu); | ||
414 | if (atomic_read(&li->active)) { | ||
415 | do { | ||
416 | deliver = 0; | ||
417 | spin_lock_bh(&li->lock); | ||
418 | list_for_each_entry_safe(inti, n, &li->list, list) { | ||
419 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
420 | list_del(&inti->list); | ||
421 | deliver = 1; | ||
422 | break; | ||
423 | } | ||
424 | __set_intercept_indicator(vcpu, inti); | ||
425 | } | ||
426 | if (list_empty(&li->list)) | ||
427 | atomic_set(&li->active, 0); | ||
428 | spin_unlock_bh(&li->lock); | ||
429 | if (deliver) { | ||
430 | __do_deliver_interrupt(vcpu, inti); | ||
431 | kfree(inti); | ||
432 | } | ||
433 | } while (deliver); | ||
434 | } | ||
435 | |||
436 | if ((vcpu->arch.sie_block->ckc < | ||
437 | get_clock() + vcpu->arch.sie_block->epoch)) | ||
438 | __try_deliver_ckc_interrupt(vcpu); | ||
439 | |||
440 | if (atomic_read(&fi->active)) { | ||
441 | do { | ||
442 | deliver = 0; | ||
443 | spin_lock_bh(&fi->lock); | ||
444 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
445 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
446 | list_del(&inti->list); | ||
447 | deliver = 1; | ||
448 | break; | ||
449 | } | ||
450 | __set_intercept_indicator(vcpu, inti); | ||
451 | } | ||
452 | if (list_empty(&fi->list)) | ||
453 | atomic_set(&fi->active, 0); | ||
454 | spin_unlock_bh(&fi->lock); | ||
455 | if (deliver) { | ||
456 | __do_deliver_interrupt(vcpu, inti); | ||
457 | kfree(inti); | ||
458 | } | ||
459 | } while (deliver); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | ||
464 | { | ||
465 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
466 | struct interrupt_info *inti; | ||
467 | |||
468 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
469 | if (!inti) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | inti->type = KVM_S390_PROGRAM_INT;; | ||
473 | inti->pgm.code = code; | ||
474 | |||
475 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | ||
476 | spin_lock_bh(&li->lock); | ||
477 | list_add(&inti->list, &li->list); | ||
478 | atomic_set(&li->active, 1); | ||
479 | BUG_ON(waitqueue_active(&li->wq)); | ||
480 | spin_unlock_bh(&li->lock); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | int kvm_s390_inject_vm(struct kvm *kvm, | ||
485 | struct kvm_s390_interrupt *s390int) | ||
486 | { | ||
487 | struct local_interrupt *li; | ||
488 | struct float_interrupt *fi; | ||
489 | struct interrupt_info *inti; | ||
490 | int sigcpu; | ||
491 | |||
492 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
493 | if (!inti) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | switch (s390int->type) { | ||
497 | case KVM_S390_INT_VIRTIO: | ||
498 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx", | ||
499 | s390int->parm, s390int->parm64); | ||
500 | inti->type = s390int->type; | ||
501 | inti->ext.ext_params = s390int->parm; | ||
502 | inti->ext.ext_params2 = s390int->parm64; | ||
503 | break; | ||
504 | case KVM_S390_INT_SERVICE: | ||
505 | VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); | ||
506 | inti->type = s390int->type; | ||
507 | inti->ext.ext_params = s390int->parm; | ||
508 | break; | ||
509 | case KVM_S390_PROGRAM_INT: | ||
510 | case KVM_S390_SIGP_STOP: | ||
511 | case KVM_S390_INT_EMERGENCY: | ||
512 | default: | ||
513 | kfree(inti); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | mutex_lock(&kvm->lock); | ||
518 | fi = &kvm->arch.float_int; | ||
519 | spin_lock_bh(&fi->lock); | ||
520 | list_add_tail(&inti->list, &fi->list); | ||
521 | atomic_set(&fi->active, 1); | ||
522 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | ||
523 | if (sigcpu == KVM_MAX_VCPUS) { | ||
524 | do { | ||
525 | sigcpu = fi->next_rr_cpu++; | ||
526 | if (sigcpu == KVM_MAX_VCPUS) | ||
527 | sigcpu = fi->next_rr_cpu = 0; | ||
528 | } while (fi->local_int[sigcpu] == NULL); | ||
529 | } | ||
530 | li = fi->local_int[sigcpu]; | ||
531 | spin_lock_bh(&li->lock); | ||
532 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
533 | if (waitqueue_active(&li->wq)) | ||
534 | wake_up_interruptible(&li->wq); | ||
535 | spin_unlock_bh(&li->lock); | ||
536 | spin_unlock_bh(&fi->lock); | ||
537 | mutex_unlock(&kvm->lock); | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | ||
542 | struct kvm_s390_interrupt *s390int) | ||
543 | { | ||
544 | struct local_interrupt *li; | ||
545 | struct interrupt_info *inti; | ||
546 | |||
547 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
548 | if (!inti) | ||
549 | return -ENOMEM; | ||
550 | |||
551 | switch (s390int->type) { | ||
552 | case KVM_S390_PROGRAM_INT: | ||
553 | if (s390int->parm & 0xffff0000) { | ||
554 | kfree(inti); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | inti->type = s390int->type; | ||
558 | inti->pgm.code = s390int->parm; | ||
559 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", | ||
560 | s390int->parm); | ||
561 | break; | ||
562 | case KVM_S390_SIGP_STOP: | ||
563 | case KVM_S390_RESTART: | ||
564 | case KVM_S390_SIGP_SET_PREFIX: | ||
565 | case KVM_S390_INT_EMERGENCY: | ||
566 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | ||
567 | inti->type = s390int->type; | ||
568 | break; | ||
569 | case KVM_S390_INT_VIRTIO: | ||
570 | case KVM_S390_INT_SERVICE: | ||
571 | default: | ||
572 | kfree(inti); | ||
573 | return -EINVAL; | ||
574 | } | ||
575 | |||
576 | mutex_lock(&vcpu->kvm->lock); | ||
577 | li = &vcpu->arch.local_int; | ||
578 | spin_lock_bh(&li->lock); | ||
579 | if (inti->type == KVM_S390_PROGRAM_INT) | ||
580 | list_add(&inti->list, &li->list); | ||
581 | else | ||
582 | list_add_tail(&inti->list, &li->list); | ||
583 | atomic_set(&li->active, 1); | ||
584 | if (inti->type == KVM_S390_SIGP_STOP) | ||
585 | li->action_bits |= ACTION_STOP_ON_STOP; | ||
586 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
587 | if (waitqueue_active(&li->wq)) | ||
588 | wake_up_interruptible(&vcpu->arch.local_int.wq); | ||
589 | spin_unlock_bh(&li->lock); | ||
590 | mutex_unlock(&vcpu->kvm->lock); | ||
591 | return 0; | ||
592 | } | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c new file mode 100644 index 000000000000..98d1e73e01f1 --- /dev/null +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -0,0 +1,685 @@ | |||
1 | /* | ||
2 | * s390host.c -- hosting zSeries kernel virtual machines | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
13 | */ | ||
14 | |||
15 | #include <linux/compiler.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kvm.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/timer.h> | ||
24 | #include <asm/lowcore.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | |||
27 | #include "kvm-s390.h" | ||
28 | #include "gaccess.h" | ||
29 | |||
30 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | ||
31 | |||
32 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | ||
34 | { "exit_validity", VCPU_STAT(exit_validity) }, | ||
35 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | ||
36 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | ||
37 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, | ||
38 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | ||
39 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | ||
40 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | ||
41 | { "instruction_lctg", VCPU_STAT(instruction_lctg) }, | ||
42 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | ||
43 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | ||
44 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | ||
45 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, | ||
46 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, | ||
47 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, | ||
48 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, | ||
49 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, | ||
50 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, | ||
51 | { "instruction_stidp", VCPU_STAT(instruction_stidp) }, | ||
52 | { "instruction_spx", VCPU_STAT(instruction_spx) }, | ||
53 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, | ||
54 | { "instruction_stap", VCPU_STAT(instruction_stap) }, | ||
55 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, | ||
56 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, | ||
57 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, | ||
58 | { "instruction_stsi", VCPU_STAT(instruction_stsi) }, | ||
59 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | ||
60 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | ||
61 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | ||
62 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | ||
63 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | ||
64 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | ||
65 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | ||
66 | { "diagnose_44", VCPU_STAT(diagnose_44) }, | ||
67 | { NULL } | ||
68 | }; | ||
69 | |||
70 | |||
71 | /* Section: not file related */ | ||
72 | void kvm_arch_hardware_enable(void *garbage) | ||
73 | { | ||
74 | /* every s390 is virtualization enabled ;-) */ | ||
75 | } | ||
76 | |||
77 | void kvm_arch_hardware_disable(void *garbage) | ||
78 | { | ||
79 | } | ||
80 | |||
81 | void decache_vcpus_on_cpu(int cpu) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | int kvm_arch_hardware_setup(void) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | void kvm_arch_hardware_unsetup(void) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | void kvm_arch_check_processor_compat(void *rtn) | ||
95 | { | ||
96 | } | ||
97 | |||
98 | int kvm_arch_init(void *opaque) | ||
99 | { | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | void kvm_arch_exit(void) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | /* Section: device related */ | ||
108 | long kvm_arch_dev_ioctl(struct file *filp, | ||
109 | unsigned int ioctl, unsigned long arg) | ||
110 | { | ||
111 | if (ioctl == KVM_S390_ENABLE_SIE) | ||
112 | return s390_enable_sie(); | ||
113 | return -EINVAL; | ||
114 | } | ||
115 | |||
116 | int kvm_dev_ioctl_check_extension(long ext) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /* Section: vm related */ | ||
122 | /* | ||
123 | * Get (and clear) the dirty memory log for a memory slot. | ||
124 | */ | ||
125 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
126 | struct kvm_dirty_log *log) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | long kvm_arch_vm_ioctl(struct file *filp, | ||
132 | unsigned int ioctl, unsigned long arg) | ||
133 | { | ||
134 | struct kvm *kvm = filp->private_data; | ||
135 | void __user *argp = (void __user *)arg; | ||
136 | int r; | ||
137 | |||
138 | switch (ioctl) { | ||
139 | case KVM_S390_INTERRUPT: { | ||
140 | struct kvm_s390_interrupt s390int; | ||
141 | |||
142 | r = -EFAULT; | ||
143 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | ||
144 | break; | ||
145 | r = kvm_s390_inject_vm(kvm, &s390int); | ||
146 | break; | ||
147 | } | ||
148 | default: | ||
149 | r = -EINVAL; | ||
150 | } | ||
151 | |||
152 | return r; | ||
153 | } | ||
154 | |||
155 | struct kvm *kvm_arch_create_vm(void) | ||
156 | { | ||
157 | struct kvm *kvm; | ||
158 | int rc; | ||
159 | char debug_name[16]; | ||
160 | |||
161 | rc = s390_enable_sie(); | ||
162 | if (rc) | ||
163 | goto out_nokvm; | ||
164 | |||
165 | rc = -ENOMEM; | ||
166 | kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); | ||
167 | if (!kvm) | ||
168 | goto out_nokvm; | ||
169 | |||
170 | kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); | ||
171 | if (!kvm->arch.sca) | ||
172 | goto out_nosca; | ||
173 | |||
174 | sprintf(debug_name, "kvm-%u", current->pid); | ||
175 | |||
176 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); | ||
177 | if (!kvm->arch.dbf) | ||
178 | goto out_nodbf; | ||
179 | |||
180 | spin_lock_init(&kvm->arch.float_int.lock); | ||
181 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | ||
182 | |||
183 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | ||
184 | VM_EVENT(kvm, 3, "%s", "vm created"); | ||
185 | |||
186 | try_module_get(THIS_MODULE); | ||
187 | |||
188 | return kvm; | ||
189 | out_nodbf: | ||
190 | free_page((unsigned long)(kvm->arch.sca)); | ||
191 | out_nosca: | ||
192 | kfree(kvm); | ||
193 | out_nokvm: | ||
194 | return ERR_PTR(rc); | ||
195 | } | ||
196 | |||
197 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
198 | { | ||
199 | debug_unregister(kvm->arch.dbf); | ||
200 | free_page((unsigned long)(kvm->arch.sca)); | ||
201 | kfree(kvm); | ||
202 | module_put(THIS_MODULE); | ||
203 | } | ||
204 | |||
205 | /* Section: vcpu related */ | ||
206 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
207 | { | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
212 | { | ||
213 | /* kvm common code refers to this, but does'nt call it */ | ||
214 | BUG(); | ||
215 | } | ||
216 | |||
217 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
218 | { | ||
219 | save_fp_regs(&vcpu->arch.host_fpregs); | ||
220 | save_access_regs(vcpu->arch.host_acrs); | ||
221 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | ||
222 | restore_fp_regs(&vcpu->arch.guest_fpregs); | ||
223 | restore_access_regs(vcpu->arch.guest_acrs); | ||
224 | |||
225 | if (signal_pending(current)) | ||
226 | atomic_set_mask(CPUSTAT_STOP_INT, | ||
227 | &vcpu->arch.sie_block->cpuflags); | ||
228 | } | ||
229 | |||
230 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | save_fp_regs(&vcpu->arch.guest_fpregs); | ||
233 | save_access_regs(vcpu->arch.guest_acrs); | ||
234 | restore_fp_regs(&vcpu->arch.host_fpregs); | ||
235 | restore_access_regs(vcpu->arch.host_acrs); | ||
236 | } | ||
237 | |||
238 | static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | ||
239 | { | ||
240 | /* this equals initial cpu reset in pop, but we don't switch to ESA */ | ||
241 | vcpu->arch.sie_block->gpsw.mask = 0UL; | ||
242 | vcpu->arch.sie_block->gpsw.addr = 0UL; | ||
243 | vcpu->arch.sie_block->prefix = 0UL; | ||
244 | vcpu->arch.sie_block->ihcpu = 0xffff; | ||
245 | vcpu->arch.sie_block->cputm = 0UL; | ||
246 | vcpu->arch.sie_block->ckc = 0UL; | ||
247 | vcpu->arch.sie_block->todpr = 0; | ||
248 | memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); | ||
249 | vcpu->arch.sie_block->gcr[0] = 0xE0UL; | ||
250 | vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; | ||
251 | vcpu->arch.guest_fpregs.fpc = 0; | ||
252 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); | ||
253 | vcpu->arch.sie_block->gbea = 1; | ||
254 | } | ||
255 | |||
256 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
257 | { | ||
258 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); | ||
259 | vcpu->arch.sie_block->gmslm = 0xffffffffffUL; | ||
260 | vcpu->arch.sie_block->gmsor = 0x000000000000; | ||
261 | vcpu->arch.sie_block->ecb = 2; | ||
262 | vcpu->arch.sie_block->eca = 0xC1002001U; | ||
263 | setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, | ||
264 | (unsigned long) vcpu); | ||
265 | get_cpu_id(&vcpu->arch.cpu_id); | ||
266 | vcpu->arch.cpu_id.version = 0xfe; | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | ||
271 | unsigned int id) | ||
272 | { | ||
273 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | ||
274 | int rc = -ENOMEM; | ||
275 | |||
276 | if (!vcpu) | ||
277 | goto out_nomem; | ||
278 | |||
279 | vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL); | ||
280 | |||
281 | if (!vcpu->arch.sie_block) | ||
282 | goto out_free_cpu; | ||
283 | |||
284 | vcpu->arch.sie_block->icpua = id; | ||
285 | BUG_ON(!kvm->arch.sca); | ||
286 | BUG_ON(kvm->arch.sca->cpu[id].sda); | ||
287 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; | ||
288 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); | ||
289 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | ||
290 | |||
291 | spin_lock_init(&vcpu->arch.local_int.lock); | ||
292 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); | ||
293 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | ||
294 | spin_lock_bh(&kvm->arch.float_int.lock); | ||
295 | kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; | ||
296 | init_waitqueue_head(&vcpu->arch.local_int.wq); | ||
297 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; | ||
298 | spin_unlock_bh(&kvm->arch.float_int.lock); | ||
299 | |||
300 | rc = kvm_vcpu_init(vcpu, kvm, id); | ||
301 | if (rc) | ||
302 | goto out_free_cpu; | ||
303 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, | ||
304 | vcpu->arch.sie_block); | ||
305 | |||
306 | try_module_get(THIS_MODULE); | ||
307 | |||
308 | return vcpu; | ||
309 | out_free_cpu: | ||
310 | kfree(vcpu); | ||
311 | out_nomem: | ||
312 | return ERR_PTR(rc); | ||
313 | } | ||
314 | |||
315 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
316 | { | ||
317 | VCPU_EVENT(vcpu, 3, "%s", "destroy cpu"); | ||
318 | free_page((unsigned long)(vcpu->arch.sie_block)); | ||
319 | kfree(vcpu); | ||
320 | module_put(THIS_MODULE); | ||
321 | } | ||
322 | |||
323 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
324 | { | ||
325 | /* kvm common code refers to this, but never calls it */ | ||
326 | BUG(); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) | ||
331 | { | ||
332 | vcpu_load(vcpu); | ||
333 | kvm_s390_vcpu_initial_reset(vcpu); | ||
334 | vcpu_put(vcpu); | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
339 | { | ||
340 | vcpu_load(vcpu); | ||
341 | memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs)); | ||
342 | vcpu_put(vcpu); | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
347 | { | ||
348 | vcpu_load(vcpu); | ||
349 | memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); | ||
350 | vcpu_put(vcpu); | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
355 | struct kvm_sregs *sregs) | ||
356 | { | ||
357 | vcpu_load(vcpu); | ||
358 | memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); | ||
359 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); | ||
360 | vcpu_put(vcpu); | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
365 | struct kvm_sregs *sregs) | ||
366 | { | ||
367 | vcpu_load(vcpu); | ||
368 | memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); | ||
369 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); | ||
370 | vcpu_put(vcpu); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
375 | { | ||
376 | vcpu_load(vcpu); | ||
377 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); | ||
378 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; | ||
379 | vcpu_put(vcpu); | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
384 | { | ||
385 | vcpu_load(vcpu); | ||
386 | memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); | ||
387 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; | ||
388 | vcpu_put(vcpu); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | ||
393 | { | ||
394 | int rc = 0; | ||
395 | |||
396 | vcpu_load(vcpu); | ||
397 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | ||
398 | rc = -EBUSY; | ||
399 | else | ||
400 | vcpu->arch.sie_block->gpsw = psw; | ||
401 | vcpu_put(vcpu); | ||
402 | return rc; | ||
403 | } | ||
404 | |||
405 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
406 | struct kvm_translation *tr) | ||
407 | { | ||
408 | return -EINVAL; /* not implemented yet */ | ||
409 | } | ||
410 | |||
411 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | ||
412 | struct kvm_debug_guest *dbg) | ||
413 | { | ||
414 | return -EINVAL; /* not implemented yet */ | ||
415 | } | ||
416 | |||
417 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
418 | struct kvm_mp_state *mp_state) | ||
419 | { | ||
420 | return -EINVAL; /* not implemented yet */ | ||
421 | } | ||
422 | |||
423 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
424 | struct kvm_mp_state *mp_state) | ||
425 | { | ||
426 | return -EINVAL; /* not implemented yet */ | ||
427 | } | ||
428 | |||
429 | static void __vcpu_run(struct kvm_vcpu *vcpu) | ||
430 | { | ||
431 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); | ||
432 | |||
433 | if (need_resched()) | ||
434 | schedule(); | ||
435 | |||
436 | vcpu->arch.sie_block->icptcode = 0; | ||
437 | local_irq_disable(); | ||
438 | kvm_guest_enter(); | ||
439 | local_irq_enable(); | ||
440 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | ||
441 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | ||
442 | sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); | ||
443 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | ||
444 | vcpu->arch.sie_block->icptcode); | ||
445 | local_irq_disable(); | ||
446 | kvm_guest_exit(); | ||
447 | local_irq_enable(); | ||
448 | |||
449 | memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); | ||
450 | } | ||
451 | |||
452 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
453 | { | ||
454 | int rc; | ||
455 | sigset_t sigsaved; | ||
456 | |||
457 | vcpu_load(vcpu); | ||
458 | |||
459 | if (vcpu->sigset_active) | ||
460 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
461 | |||
462 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
463 | |||
464 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); | ||
465 | |||
466 | switch (kvm_run->exit_reason) { | ||
467 | case KVM_EXIT_S390_SIEIC: | ||
468 | vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; | ||
469 | vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; | ||
470 | break; | ||
471 | case KVM_EXIT_UNKNOWN: | ||
472 | case KVM_EXIT_S390_RESET: | ||
473 | break; | ||
474 | default: | ||
475 | BUG(); | ||
476 | } | ||
477 | |||
478 | might_sleep(); | ||
479 | |||
480 | do { | ||
481 | kvm_s390_deliver_pending_interrupts(vcpu); | ||
482 | __vcpu_run(vcpu); | ||
483 | rc = kvm_handle_sie_intercept(vcpu); | ||
484 | } while (!signal_pending(current) && !rc); | ||
485 | |||
486 | if (signal_pending(current) && !rc) | ||
487 | rc = -EINTR; | ||
488 | |||
489 | if (rc == -ENOTSUPP) { | ||
490 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | ||
491 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | ||
492 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; | ||
493 | kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask; | ||
494 | kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr; | ||
495 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; | ||
496 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; | ||
497 | rc = 0; | ||
498 | } | ||
499 | |||
500 | if (rc == -EREMOTE) { | ||
501 | /* intercept was handled, but userspace support is needed | ||
502 | * kvm_run has been prepared by the handler */ | ||
503 | rc = 0; | ||
504 | } | ||
505 | |||
506 | if (vcpu->sigset_active) | ||
507 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
508 | |||
509 | vcpu_put(vcpu); | ||
510 | |||
511 | vcpu->stat.exit_userspace++; | ||
512 | return rc; | ||
513 | } | ||
514 | |||
515 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | ||
516 | unsigned long n, int prefix) | ||
517 | { | ||
518 | if (prefix) | ||
519 | return copy_to_guest(vcpu, guestdest, from, n); | ||
520 | else | ||
521 | return copy_to_guest_absolute(vcpu, guestdest, from, n); | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | * store status at address | ||
526 | * we use have two special cases: | ||
527 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | ||
528 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | ||
529 | */ | ||
530 | int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
531 | { | ||
532 | const unsigned char archmode = 1; | ||
533 | int prefix; | ||
534 | |||
535 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | ||
536 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) | ||
537 | return -EFAULT; | ||
538 | addr = SAVE_AREA_BASE; | ||
539 | prefix = 0; | ||
540 | } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { | ||
541 | if (copy_to_guest(vcpu, 163ul, &archmode, 1)) | ||
542 | return -EFAULT; | ||
543 | addr = SAVE_AREA_BASE; | ||
544 | prefix = 1; | ||
545 | } else | ||
546 | prefix = 0; | ||
547 | |||
548 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), | ||
549 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | ||
550 | return -EFAULT; | ||
551 | |||
552 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), | ||
553 | vcpu->arch.guest_gprs, 128, prefix)) | ||
554 | return -EFAULT; | ||
555 | |||
556 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), | ||
557 | &vcpu->arch.sie_block->gpsw, 16, prefix)) | ||
558 | return -EFAULT; | ||
559 | |||
560 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), | ||
561 | &vcpu->arch.sie_block->prefix, 4, prefix)) | ||
562 | return -EFAULT; | ||
563 | |||
564 | if (__guestcopy(vcpu, | ||
565 | addr + offsetof(struct save_area_s390x, fp_ctrl_reg), | ||
566 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) | ||
567 | return -EFAULT; | ||
568 | |||
569 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), | ||
570 | &vcpu->arch.sie_block->todpr, 4, prefix)) | ||
571 | return -EFAULT; | ||
572 | |||
573 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), | ||
574 | &vcpu->arch.sie_block->cputm, 8, prefix)) | ||
575 | return -EFAULT; | ||
576 | |||
577 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), | ||
578 | &vcpu->arch.sie_block->ckc, 8, prefix)) | ||
579 | return -EFAULT; | ||
580 | |||
581 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), | ||
582 | &vcpu->arch.guest_acrs, 64, prefix)) | ||
583 | return -EFAULT; | ||
584 | |||
585 | if (__guestcopy(vcpu, | ||
586 | addr + offsetof(struct save_area_s390x, ctrl_regs), | ||
587 | &vcpu->arch.sie_block->gcr, 128, prefix)) | ||
588 | return -EFAULT; | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
593 | { | ||
594 | int rc; | ||
595 | |||
596 | vcpu_load(vcpu); | ||
597 | rc = __kvm_s390_vcpu_store_status(vcpu, addr); | ||
598 | vcpu_put(vcpu); | ||
599 | return rc; | ||
600 | } | ||
601 | |||
602 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
603 | unsigned int ioctl, unsigned long arg) | ||
604 | { | ||
605 | struct kvm_vcpu *vcpu = filp->private_data; | ||
606 | void __user *argp = (void __user *)arg; | ||
607 | |||
608 | switch (ioctl) { | ||
609 | case KVM_S390_INTERRUPT: { | ||
610 | struct kvm_s390_interrupt s390int; | ||
611 | |||
612 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | ||
613 | return -EFAULT; | ||
614 | return kvm_s390_inject_vcpu(vcpu, &s390int); | ||
615 | } | ||
616 | case KVM_S390_STORE_STATUS: | ||
617 | return kvm_s390_vcpu_store_status(vcpu, arg); | ||
618 | case KVM_S390_SET_INITIAL_PSW: { | ||
619 | psw_t psw; | ||
620 | |||
621 | if (copy_from_user(&psw, argp, sizeof(psw))) | ||
622 | return -EFAULT; | ||
623 | return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); | ||
624 | } | ||
625 | case KVM_S390_INITIAL_RESET: | ||
626 | return kvm_arch_vcpu_ioctl_initial_reset(vcpu); | ||
627 | default: | ||
628 | ; | ||
629 | } | ||
630 | return -EINVAL; | ||
631 | } | ||
632 | |||
633 | /* Section: memory related */ | ||
634 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
635 | struct kvm_userspace_memory_region *mem, | ||
636 | struct kvm_memory_slot old, | ||
637 | int user_alloc) | ||
638 | { | ||
639 | /* A few sanity checks. We can have exactly one memory slot which has | ||
640 | to start at guest virtual zero and which has to be located at a | ||
641 | page boundary in userland and which has to end at a page boundary. | ||
642 | The memory in userland is ok to be fragmented into various different | ||
643 | vmas. It is okay to mmap() and munmap() stuff in this slot after | ||
644 | doing this call at any time */ | ||
645 | |||
646 | if (mem->slot) | ||
647 | return -EINVAL; | ||
648 | |||
649 | if (mem->guest_phys_addr) | ||
650 | return -EINVAL; | ||
651 | |||
652 | if (mem->userspace_addr & (PAGE_SIZE - 1)) | ||
653 | return -EINVAL; | ||
654 | |||
655 | if (mem->memory_size & (PAGE_SIZE - 1)) | ||
656 | return -EINVAL; | ||
657 | |||
658 | kvm->arch.guest_origin = mem->userspace_addr; | ||
659 | kvm->arch.guest_memsize = mem->memory_size; | ||
660 | |||
661 | /* FIXME: we do want to interrupt running CPUs and update their memory | ||
662 | configuration now to avoid race conditions. But hey, changing the | ||
663 | memory layout while virtual CPUs are running is usually bad | ||
664 | programming practice. */ | ||
665 | |||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
670 | { | ||
671 | return gfn; | ||
672 | } | ||
673 | |||
674 | static int __init kvm_s390_init(void) | ||
675 | { | ||
676 | return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | ||
677 | } | ||
678 | |||
679 | static void __exit kvm_s390_exit(void) | ||
680 | { | ||
681 | kvm_exit(); | ||
682 | } | ||
683 | |||
684 | module_init(kvm_s390_init); | ||
685 | module_exit(kvm_s390_exit); | ||
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h new file mode 100644 index 000000000000..3893cf12eacf --- /dev/null +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * kvm_s390.h - definition for kvm on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | #ifndef ARCH_S390_KVM_S390_H | ||
15 | #define ARCH_S390_KVM_S390_H | ||
16 | |||
17 | #include <linux/kvm.h> | ||
18 | #include <linux/kvm_host.h> | ||
19 | |||
20 | typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); | ||
21 | |||
22 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ | ||
25 | do { \ | ||
26 | debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ | ||
27 | d_args); \ | ||
28 | } while (0) | ||
29 | |||
30 | #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\ | ||
31 | do { \ | ||
32 | debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ | ||
33 | "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \ | ||
34 | d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ | ||
35 | d_args); \ | ||
36 | } while (0) | ||
37 | |||
38 | static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) | ||
39 | { | ||
40 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; | ||
41 | } | ||
42 | |||
43 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); | ||
44 | void kvm_s390_idle_wakeup(unsigned long data); | ||
45 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | ||
46 | int kvm_s390_inject_vm(struct kvm *kvm, | ||
47 | struct kvm_s390_interrupt *s390int); | ||
48 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | ||
49 | struct kvm_s390_interrupt *s390int); | ||
50 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | ||
51 | |||
52 | /* implemented in priv.c */ | ||
53 | int kvm_s390_handle_priv(struct kvm_vcpu *vcpu); | ||
54 | |||
55 | /* implemented in sigp.c */ | ||
56 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | ||
57 | |||
58 | /* implemented in kvm-s390.c */ | ||
59 | int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, | ||
60 | unsigned long addr); | ||
61 | /* implemented in diag.c */ | ||
62 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | ||
63 | |||
64 | #endif | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c new file mode 100644 index 000000000000..1465946325c5 --- /dev/null +++ b/arch/s390/kvm/priv.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * priv.c - handling privileged instructions | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | #include <linux/kvm.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <asm/current.h> | ||
17 | #include <asm/debug.h> | ||
18 | #include <asm/ebcdic.h> | ||
19 | #include <asm/sysinfo.h> | ||
20 | #include "gaccess.h" | ||
21 | #include "kvm-s390.h" | ||
22 | |||
23 | static int handle_set_prefix(struct kvm_vcpu *vcpu) | ||
24 | { | ||
25 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
26 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
27 | u64 operand2; | ||
28 | u32 address = 0; | ||
29 | u8 tmp; | ||
30 | |||
31 | vcpu->stat.instruction_spx++; | ||
32 | |||
33 | operand2 = disp2; | ||
34 | if (base2) | ||
35 | operand2 += vcpu->arch.guest_gprs[base2]; | ||
36 | |||
37 | /* must be word boundary */ | ||
38 | if (operand2 & 3) { | ||
39 | kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
40 | goto out; | ||
41 | } | ||
42 | |||
43 | /* get the value */ | ||
44 | if (get_guest_u32(vcpu, operand2, &address)) { | ||
45 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
46 | goto out; | ||
47 | } | ||
48 | |||
49 | address = address & 0x7fffe000u; | ||
50 | |||
51 | /* make sure that the new value is valid memory */ | ||
52 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | ||
53 | (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { | ||
54 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
55 | goto out; | ||
56 | } | ||
57 | |||
58 | vcpu->arch.sie_block->prefix = address; | ||
59 | vcpu->arch.sie_block->ihcpu = 0xffff; | ||
60 | |||
61 | VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); | ||
62 | out: | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int handle_store_prefix(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
69 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
70 | u64 operand2; | ||
71 | u32 address; | ||
72 | |||
73 | vcpu->stat.instruction_stpx++; | ||
74 | operand2 = disp2; | ||
75 | if (base2) | ||
76 | operand2 += vcpu->arch.guest_gprs[base2]; | ||
77 | |||
78 | /* must be word boundary */ | ||
79 | if (operand2 & 3) { | ||
80 | kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | address = vcpu->arch.sie_block->prefix; | ||
85 | address = address & 0x7fffe000u; | ||
86 | |||
87 | /* get the value */ | ||
88 | if (put_guest_u32(vcpu, operand2, address)) { | ||
89 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); | ||
94 | out: | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
101 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
102 | u64 useraddr; | ||
103 | int rc; | ||
104 | |||
105 | vcpu->stat.instruction_stap++; | ||
106 | useraddr = disp2; | ||
107 | if (base2) | ||
108 | useraddr += vcpu->arch.guest_gprs[base2]; | ||
109 | |||
110 | if (useraddr & 1) { | ||
111 | kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
112 | goto out; | ||
113 | } | ||
114 | |||
115 | rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); | ||
116 | if (rc == -EFAULT) { | ||
117 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
118 | goto out; | ||
119 | } | ||
120 | |||
121 | VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr); | ||
122 | out: | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static int handle_skey(struct kvm_vcpu *vcpu) | ||
127 | { | ||
128 | vcpu->stat.instruction_storage_key++; | ||
129 | vcpu->arch.sie_block->gpsw.addr -= 4; | ||
130 | VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int handle_stsch(struct kvm_vcpu *vcpu) | ||
135 | { | ||
136 | vcpu->stat.instruction_stsch++; | ||
137 | VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3"); | ||
138 | /* condition code 3 */ | ||
139 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | ||
140 | vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int handle_chsc(struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | vcpu->stat.instruction_chsc++; | ||
147 | VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3"); | ||
148 | /* condition code 3 */ | ||
149 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | ||
150 | vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static unsigned int kvm_stfl(void) | ||
155 | { | ||
156 | asm volatile( | ||
157 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | ||
158 | "0:\n" | ||
159 | EX_TABLE(0b, 0b)); | ||
160 | return S390_lowcore.stfl_fac_list; | ||
161 | } | ||
162 | |||
163 | static int handle_stfl(struct kvm_vcpu *vcpu) | ||
164 | { | ||
165 | unsigned int facility_list = kvm_stfl(); | ||
166 | int rc; | ||
167 | |||
168 | vcpu->stat.instruction_stfl++; | ||
169 | facility_list &= ~(1UL<<24); /* no stfle */ | ||
170 | |||
171 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | ||
172 | &facility_list, sizeof(facility_list)); | ||
173 | if (rc == -EFAULT) | ||
174 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
175 | else | ||
176 | VCPU_EVENT(vcpu, 5, "store facility list value %x", | ||
177 | facility_list); | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static int handle_stidp(struct kvm_vcpu *vcpu) | ||
182 | { | ||
183 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
184 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
185 | u64 operand2; | ||
186 | int rc; | ||
187 | |||
188 | vcpu->stat.instruction_stidp++; | ||
189 | operand2 = disp2; | ||
190 | if (base2) | ||
191 | operand2 += vcpu->arch.guest_gprs[base2]; | ||
192 | |||
193 | if (operand2 & 7) { | ||
194 | kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
195 | goto out; | ||
196 | } | ||
197 | |||
198 | rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); | ||
199 | if (rc == -EFAULT) { | ||
200 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); | ||
205 | out: | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | ||
210 | { | ||
211 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
212 | int cpus = 0; | ||
213 | int n; | ||
214 | |||
215 | spin_lock_bh(&fi->lock); | ||
216 | for (n = 0; n < KVM_MAX_VCPUS; n++) | ||
217 | if (fi->local_int[n]) | ||
218 | cpus++; | ||
219 | spin_unlock_bh(&fi->lock); | ||
220 | |||
221 | /* deal with other level 3 hypervisors */ | ||
222 | if (stsi(mem, 3, 2, 2) == -ENOSYS) | ||
223 | mem->count = 0; | ||
224 | if (mem->count < 8) | ||
225 | mem->count++; | ||
226 | for (n = mem->count - 1; n > 0 ; n--) | ||
227 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); | ||
228 | |||
229 | mem->vm[0].cpus_total = cpus; | ||
230 | mem->vm[0].cpus_configured = cpus; | ||
231 | mem->vm[0].cpus_standby = 0; | ||
232 | mem->vm[0].cpus_reserved = 0; | ||
233 | mem->vm[0].caf = 1000; | ||
234 | memcpy(mem->vm[0].name, "KVMguest", 8); | ||
235 | ASCEBC(mem->vm[0].name, 8); | ||
236 | memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); | ||
237 | ASCEBC(mem->vm[0].cpi, 16); | ||
238 | } | ||
239 | |||
240 | static int handle_stsi(struct kvm_vcpu *vcpu) | ||
241 | { | ||
242 | int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28; | ||
243 | int sel1 = vcpu->arch.guest_gprs[0] & 0xff; | ||
244 | int sel2 = vcpu->arch.guest_gprs[1] & 0xffff; | ||
245 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
246 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
247 | u64 operand2; | ||
248 | unsigned long mem; | ||
249 | |||
250 | vcpu->stat.instruction_stsi++; | ||
251 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); | ||
252 | |||
253 | operand2 = disp2; | ||
254 | if (base2) | ||
255 | operand2 += vcpu->arch.guest_gprs[base2]; | ||
256 | |||
257 | if (operand2 & 0xfff && fc > 0) | ||
258 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
259 | |||
260 | switch (fc) { | ||
261 | case 0: | ||
262 | vcpu->arch.guest_gprs[0] = 3 << 28; | ||
263 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | ||
264 | return 0; | ||
265 | case 1: /* same handling for 1 and 2 */ | ||
266 | case 2: | ||
267 | mem = get_zeroed_page(GFP_KERNEL); | ||
268 | if (!mem) | ||
269 | goto out_fail; | ||
270 | if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS) | ||
271 | goto out_mem; | ||
272 | break; | ||
273 | case 3: | ||
274 | if (sel1 != 2 || sel2 != 2) | ||
275 | goto out_fail; | ||
276 | mem = get_zeroed_page(GFP_KERNEL); | ||
277 | if (!mem) | ||
278 | goto out_fail; | ||
279 | handle_stsi_3_2_2(vcpu, (void *) mem); | ||
280 | break; | ||
281 | default: | ||
282 | goto out_fail; | ||
283 | } | ||
284 | |||
285 | if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { | ||
286 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
287 | goto out_mem; | ||
288 | } | ||
289 | free_page(mem); | ||
290 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | ||
291 | vcpu->arch.guest_gprs[0] = 0; | ||
292 | return 0; | ||
293 | out_mem: | ||
294 | free_page(mem); | ||
295 | out_fail: | ||
296 | /* condition code 3 */ | ||
297 | vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static intercept_handler_t priv_handlers[256] = { | ||
302 | [0x02] = handle_stidp, | ||
303 | [0x10] = handle_set_prefix, | ||
304 | [0x11] = handle_store_prefix, | ||
305 | [0x12] = handle_store_cpu_address, | ||
306 | [0x29] = handle_skey, | ||
307 | [0x2a] = handle_skey, | ||
308 | [0x2b] = handle_skey, | ||
309 | [0x34] = handle_stsch, | ||
310 | [0x5f] = handle_chsc, | ||
311 | [0x7d] = handle_stsi, | ||
312 | [0xb1] = handle_stfl, | ||
313 | }; | ||
314 | |||
315 | int kvm_s390_handle_priv(struct kvm_vcpu *vcpu) | ||
316 | { | ||
317 | intercept_handler_t handler; | ||
318 | |||
319 | handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | ||
320 | if (handler) | ||
321 | return handler(vcpu); | ||
322 | return -ENOTSUPP; | ||
323 | } | ||
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S new file mode 100644 index 000000000000..934fd6a885f6 --- /dev/null +++ b/arch/s390/kvm/sie64a.S | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * sie64a.S - low level sie call | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | |||
16 | SP_R5 = 5 * 8 # offset into stackframe | ||
17 | SP_R6 = 6 * 8 | ||
18 | |||
19 | /* | ||
20 | * sie64a calling convention: | ||
21 | * %r2 pointer to sie control block | ||
22 | * %r3 guest register save area | ||
23 | */ | ||
24 | .globl sie64a | ||
25 | sie64a: | ||
26 | lgr %r5,%r3 | ||
27 | stmg %r5,%r14,SP_R5(%r15) # save register on entry | ||
28 | lgr %r14,%r2 # pointer to sie control block | ||
29 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 | ||
30 | sie_inst: | ||
31 | sie 0(%r14) | ||
32 | lg %r14,SP_R5(%r15) | ||
33 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
34 | lghi %r2,0 | ||
35 | lmg %r6,%r14,SP_R6(%r15) | ||
36 | br %r14 | ||
37 | |||
38 | sie_err: | ||
39 | lg %r14,SP_R5(%r15) | ||
40 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
41 | lghi %r2,-EFAULT | ||
42 | lmg %r6,%r14,SP_R6(%r15) | ||
43 | br %r14 | ||
44 | |||
45 | .section __ex_table,"a" | ||
46 | .quad sie_inst,sie_err | ||
47 | .previous | ||
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c new file mode 100644 index 000000000000..0a236acfb5f6 --- /dev/null +++ b/arch/s390/kvm/sigp.c | |||
@@ -0,0 +1,288 @@ | |||
1 | /* | ||
2 | * sigp.c - handlinge interprocessor communication | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | #include <linux/kvm.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include "gaccess.h" | ||
17 | #include "kvm-s390.h" | ||
18 | |||
19 | /* sigp order codes */ | ||
20 | #define SIGP_SENSE 0x01 | ||
21 | #define SIGP_EXTERNAL_CALL 0x02 | ||
22 | #define SIGP_EMERGENCY 0x03 | ||
23 | #define SIGP_START 0x04 | ||
24 | #define SIGP_STOP 0x05 | ||
25 | #define SIGP_RESTART 0x06 | ||
26 | #define SIGP_STOP_STORE_STATUS 0x09 | ||
27 | #define SIGP_INITIAL_CPU_RESET 0x0b | ||
28 | #define SIGP_CPU_RESET 0x0c | ||
29 | #define SIGP_SET_PREFIX 0x0d | ||
30 | #define SIGP_STORE_STATUS_ADDR 0x0e | ||
31 | #define SIGP_SET_ARCH 0x12 | ||
32 | |||
33 | /* cpu status bits */ | ||
34 | #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL | ||
35 | #define SIGP_STAT_INCORRECT_STATE 0x00000200UL | ||
36 | #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL | ||
37 | #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL | ||
38 | #define SIGP_STAT_STOPPED 0x00000040UL | ||
39 | #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL | ||
40 | #define SIGP_STAT_CHECK_STOP 0x00000010UL | ||
41 | #define SIGP_STAT_INOPERATIVE 0x00000004UL | ||
42 | #define SIGP_STAT_INVALID_ORDER 0x00000002UL | ||
43 | #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL | ||
44 | |||
45 | |||
46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) | ||
47 | { | ||
48 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
49 | int rc; | ||
50 | |||
51 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
52 | return 3; /* not operational */ | ||
53 | |||
54 | spin_lock_bh(&fi->lock); | ||
55 | if (fi->local_int[cpu_addr] == NULL) | ||
56 | rc = 3; /* not operational */ | ||
57 | else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | ||
58 | & CPUSTAT_RUNNING) { | ||
59 | *reg &= 0xffffffff00000000UL; | ||
60 | rc = 1; /* status stored */ | ||
61 | } else { | ||
62 | *reg &= 0xffffffff00000000UL; | ||
63 | *reg |= SIGP_STAT_STOPPED; | ||
64 | rc = 1; /* status stored */ | ||
65 | } | ||
66 | spin_unlock_bh(&fi->lock); | ||
67 | |||
68 | VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); | ||
69 | return rc; | ||
70 | } | ||
71 | |||
72 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | ||
73 | { | ||
74 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
75 | struct local_interrupt *li; | ||
76 | struct interrupt_info *inti; | ||
77 | int rc; | ||
78 | |||
79 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
80 | return 3; /* not operational */ | ||
81 | |||
82 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
83 | if (!inti) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | inti->type = KVM_S390_INT_EMERGENCY; | ||
87 | |||
88 | spin_lock_bh(&fi->lock); | ||
89 | li = fi->local_int[cpu_addr]; | ||
90 | if (li == NULL) { | ||
91 | rc = 3; /* not operational */ | ||
92 | kfree(inti); | ||
93 | goto unlock; | ||
94 | } | ||
95 | spin_lock_bh(&li->lock); | ||
96 | list_add_tail(&inti->list, &li->list); | ||
97 | atomic_set(&li->active, 1); | ||
98 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
99 | if (waitqueue_active(&li->wq)) | ||
100 | wake_up_interruptible(&li->wq); | ||
101 | spin_unlock_bh(&li->lock); | ||
102 | rc = 0; /* order accepted */ | ||
103 | unlock: | ||
104 | spin_unlock_bh(&fi->lock); | ||
105 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | ||
106 | return rc; | ||
107 | } | ||
108 | |||
109 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) | ||
110 | { | ||
111 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
112 | struct local_interrupt *li; | ||
113 | struct interrupt_info *inti; | ||
114 | int rc; | ||
115 | |||
116 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
117 | return 3; /* not operational */ | ||
118 | |||
119 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
120 | if (!inti) | ||
121 | return -ENOMEM; | ||
122 | |||
123 | inti->type = KVM_S390_SIGP_STOP; | ||
124 | |||
125 | spin_lock_bh(&fi->lock); | ||
126 | li = fi->local_int[cpu_addr]; | ||
127 | if (li == NULL) { | ||
128 | rc = 3; /* not operational */ | ||
129 | kfree(inti); | ||
130 | goto unlock; | ||
131 | } | ||
132 | spin_lock_bh(&li->lock); | ||
133 | list_add_tail(&inti->list, &li->list); | ||
134 | atomic_set(&li->active, 1); | ||
135 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | ||
136 | if (store) | ||
137 | li->action_bits |= ACTION_STORE_ON_STOP; | ||
138 | li->action_bits |= ACTION_STOP_ON_STOP; | ||
139 | if (waitqueue_active(&li->wq)) | ||
140 | wake_up_interruptible(&li->wq); | ||
141 | spin_unlock_bh(&li->lock); | ||
142 | rc = 0; /* order accepted */ | ||
143 | unlock: | ||
144 | spin_unlock_bh(&fi->lock); | ||
145 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); | ||
146 | return rc; | ||
147 | } | ||
148 | |||
149 | static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | ||
150 | { | ||
151 | int rc; | ||
152 | |||
153 | switch (parameter & 0xff) { | ||
154 | case 0: | ||
155 | printk(KERN_WARNING "kvm: request to switch to ESA/390 mode" | ||
156 | " not supported"); | ||
157 | rc = 3; /* not operational */ | ||
158 | break; | ||
159 | case 1: | ||
160 | case 2: | ||
161 | rc = 0; /* order accepted */ | ||
162 | break; | ||
163 | default: | ||
164 | rc = -ENOTSUPP; | ||
165 | } | ||
166 | return rc; | ||
167 | } | ||
168 | |||
169 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | ||
170 | u64 *reg) | ||
171 | { | ||
172 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
173 | struct local_interrupt *li; | ||
174 | struct interrupt_info *inti; | ||
175 | int rc; | ||
176 | u8 tmp; | ||
177 | |||
178 | /* make sure that the new value is valid memory */ | ||
179 | address = address & 0x7fffe000u; | ||
180 | if ((copy_from_guest(vcpu, &tmp, | ||
181 | (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) || | ||
182 | (copy_from_guest(vcpu, &tmp, (u64) (address + | ||
183 | vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) { | ||
184 | *reg |= SIGP_STAT_INVALID_PARAMETER; | ||
185 | return 1; /* invalid parameter */ | ||
186 | } | ||
187 | |||
188 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
189 | if (!inti) | ||
190 | return 2; /* busy */ | ||
191 | |||
192 | spin_lock_bh(&fi->lock); | ||
193 | li = fi->local_int[cpu_addr]; | ||
194 | |||
195 | if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { | ||
196 | rc = 1; /* incorrect state */ | ||
197 | *reg &= SIGP_STAT_INCORRECT_STATE; | ||
198 | kfree(inti); | ||
199 | goto out_fi; | ||
200 | } | ||
201 | |||
202 | spin_lock_bh(&li->lock); | ||
203 | /* cpu must be in stopped state */ | ||
204 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { | ||
205 | rc = 1; /* incorrect state */ | ||
206 | *reg &= SIGP_STAT_INCORRECT_STATE; | ||
207 | kfree(inti); | ||
208 | goto out_li; | ||
209 | } | ||
210 | |||
211 | inti->type = KVM_S390_SIGP_SET_PREFIX; | ||
212 | inti->prefix.address = address; | ||
213 | |||
214 | list_add_tail(&inti->list, &li->list); | ||
215 | atomic_set(&li->active, 1); | ||
216 | if (waitqueue_active(&li->wq)) | ||
217 | wake_up_interruptible(&li->wq); | ||
218 | rc = 0; /* order accepted */ | ||
219 | |||
220 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | ||
221 | out_li: | ||
222 | spin_unlock_bh(&li->lock); | ||
223 | out_fi: | ||
224 | spin_unlock_bh(&fi->lock); | ||
225 | return rc; | ||
226 | } | ||
227 | |||
228 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | ||
229 | { | ||
230 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
231 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
232 | int base2 = vcpu->arch.sie_block->ipb >> 28; | ||
233 | int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | ||
234 | u32 parameter; | ||
235 | u16 cpu_addr = vcpu->arch.guest_gprs[r3]; | ||
236 | u8 order_code; | ||
237 | int rc; | ||
238 | |||
239 | order_code = disp2; | ||
240 | if (base2) | ||
241 | order_code += vcpu->arch.guest_gprs[base2]; | ||
242 | |||
243 | if (r1 % 2) | ||
244 | parameter = vcpu->arch.guest_gprs[r1]; | ||
245 | else | ||
246 | parameter = vcpu->arch.guest_gprs[r1 + 1]; | ||
247 | |||
248 | switch (order_code) { | ||
249 | case SIGP_SENSE: | ||
250 | vcpu->stat.instruction_sigp_sense++; | ||
251 | rc = __sigp_sense(vcpu, cpu_addr, | ||
252 | &vcpu->arch.guest_gprs[r1]); | ||
253 | break; | ||
254 | case SIGP_EMERGENCY: | ||
255 | vcpu->stat.instruction_sigp_emergency++; | ||
256 | rc = __sigp_emergency(vcpu, cpu_addr); | ||
257 | break; | ||
258 | case SIGP_STOP: | ||
259 | vcpu->stat.instruction_sigp_stop++; | ||
260 | rc = __sigp_stop(vcpu, cpu_addr, 0); | ||
261 | break; | ||
262 | case SIGP_STOP_STORE_STATUS: | ||
263 | vcpu->stat.instruction_sigp_stop++; | ||
264 | rc = __sigp_stop(vcpu, cpu_addr, 1); | ||
265 | break; | ||
266 | case SIGP_SET_ARCH: | ||
267 | vcpu->stat.instruction_sigp_arch++; | ||
268 | rc = __sigp_set_arch(vcpu, parameter); | ||
269 | break; | ||
270 | case SIGP_SET_PREFIX: | ||
271 | vcpu->stat.instruction_sigp_prefix++; | ||
272 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, | ||
273 | &vcpu->arch.guest_gprs[r1]); | ||
274 | break; | ||
275 | case SIGP_RESTART: | ||
276 | vcpu->stat.instruction_sigp_restart++; | ||
277 | /* user space must know about restart */ | ||
278 | default: | ||
279 | return -ENOTSUPP; | ||
280 | } | ||
281 | |||
282 | if (rc < 0) | ||
283 | return rc; | ||
284 | |||
285 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | ||
286 | vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44; | ||
287 | return 0; | ||
288 | } | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index fd072013f88c..5c1aea97cd12 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -30,11 +30,27 @@ | |||
30 | #define TABLES_PER_PAGE 4 | 30 | #define TABLES_PER_PAGE 4 |
31 | #define FRAG_MASK 15UL | 31 | #define FRAG_MASK 15UL |
32 | #define SECOND_HALVES 10UL | 32 | #define SECOND_HALVES 10UL |
33 | |||
34 | void clear_table_pgstes(unsigned long *table) | ||
35 | { | ||
36 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | ||
37 | memset(table + 256, 0, PAGE_SIZE/4); | ||
38 | clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | ||
39 | memset(table + 768, 0, PAGE_SIZE/4); | ||
40 | } | ||
41 | |||
33 | #else | 42 | #else |
34 | #define ALLOC_ORDER 2 | 43 | #define ALLOC_ORDER 2 |
35 | #define TABLES_PER_PAGE 2 | 44 | #define TABLES_PER_PAGE 2 |
36 | #define FRAG_MASK 3UL | 45 | #define FRAG_MASK 3UL |
37 | #define SECOND_HALVES 2UL | 46 | #define SECOND_HALVES 2UL |
47 | |||
48 | void clear_table_pgstes(unsigned long *table) | ||
49 | { | ||
50 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | ||
51 | memset(table + 256, 0, PAGE_SIZE/2); | ||
52 | } | ||
53 | |||
38 | #endif | 54 | #endif |
39 | 55 | ||
40 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | 56 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) |
@@ -153,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
153 | unsigned long *table; | 169 | unsigned long *table; |
154 | unsigned long bits; | 170 | unsigned long bits; |
155 | 171 | ||
156 | bits = mm->context.noexec ? 3UL : 1UL; | 172 | bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; |
157 | spin_lock(&mm->page_table_lock); | 173 | spin_lock(&mm->page_table_lock); |
158 | page = NULL; | 174 | page = NULL; |
159 | if (!list_empty(&mm->context.pgtable_list)) { | 175 | if (!list_empty(&mm->context.pgtable_list)) { |
@@ -170,7 +186,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
170 | pgtable_page_ctor(page); | 186 | pgtable_page_ctor(page); |
171 | page->flags &= ~FRAG_MASK; | 187 | page->flags &= ~FRAG_MASK; |
172 | table = (unsigned long *) page_to_phys(page); | 188 | table = (unsigned long *) page_to_phys(page); |
173 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 189 | if (mm->context.pgstes) |
190 | clear_table_pgstes(table); | ||
191 | else | ||
192 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | ||
174 | spin_lock(&mm->page_table_lock); | 193 | spin_lock(&mm->page_table_lock); |
175 | list_add(&page->lru, &mm->context.pgtable_list); | 194 | list_add(&page->lru, &mm->context.pgtable_list); |
176 | } | 195 | } |
@@ -191,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
191 | struct page *page; | 210 | struct page *page; |
192 | unsigned long bits; | 211 | unsigned long bits; |
193 | 212 | ||
194 | bits = mm->context.noexec ? 3UL : 1UL; | 213 | bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; |
195 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 214 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
196 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 215 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
197 | spin_lock(&mm->page_table_lock); | 216 | spin_lock(&mm->page_table_lock); |
@@ -228,3 +247,43 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
228 | mm->context.noexec = 0; | 247 | mm->context.noexec = 0; |
229 | update_mm(mm, tsk); | 248 | update_mm(mm, tsk); |
230 | } | 249 | } |
250 | |||
251 | /* | ||
252 | * switch on pgstes for its userspace process (for kvm) | ||
253 | */ | ||
254 | int s390_enable_sie(void) | ||
255 | { | ||
256 | struct task_struct *tsk = current; | ||
257 | struct mm_struct *mm; | ||
258 | int rc; | ||
259 | |||
260 | task_lock(tsk); | ||
261 | |||
262 | rc = 0; | ||
263 | if (tsk->mm->context.pgstes) | ||
264 | goto unlock; | ||
265 | |||
266 | rc = -EINVAL; | ||
267 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | ||
268 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) | ||
269 | goto unlock; | ||
270 | |||
271 | tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ | ||
272 | mm = dup_mm(tsk); | ||
273 | tsk->mm->context.pgstes = 0; | ||
274 | |||
275 | rc = -ENOMEM; | ||
276 | if (!mm) | ||
277 | goto unlock; | ||
278 | mmput(tsk->mm); | ||
279 | tsk->mm = tsk->active_mm = mm; | ||
280 | preempt_disable(); | ||
281 | update_mm(mm, tsk); | ||
282 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | ||
283 | preempt_enable(); | ||
284 | rc = 0; | ||
285 | unlock: | ||
286 | task_unlock(tsk); | ||
287 | return rc; | ||
288 | } | ||
289 | EXPORT_SYMBOL_GPL(s390_enable_sie); | ||